From 00ddad305738f1c1a382b42db1c63fe43233c868 Mon Sep 17 00:00:00 2001 From: hriships Date: Tue, 20 Aug 2019 13:59:15 +0530 Subject: [PATCH 1/2] fixes 0.6.0 version comptability issues The perline version 0.6.0 has introduced following changes - Pipeline/PipelineParam was the string value, now it has changed to ArrayOrString - SeedTestData method were accpeting two params, now it has introduced `context` as 3rd param which essentially broke the compatbility with execsting pipeline API's This patch fixes, - Pipeline/PipelineRun params now refers to type of ArrayOrString - SeedTestData() utility fixes the context param issue, by extracting the function call at one place. So in future if API changes, hopefully need to fix at one place Fixes https://github.com/tektoncd/cli/issues/220 --- Makefile | 4 + go.mod | 11 +- go.sum | 19 + pkg/cmd/clustertask/list_test.go | 7 +- pkg/cmd/pipeline/describe_test.go | 13 +- pkg/cmd/pipeline/list_test.go | 13 +- pkg/cmd/pipeline/logs_test.go | 45 +- pkg/cmd/pipeline/start.go | 7 +- pkg/cmd/pipeline/start_test.go | 168 ++--- pkg/cmd/pipelineresource/describe_test.go | 13 +- pkg/cmd/pipelineresource/list_test.go | 15 +- pkg/cmd/pipelinerun/cancel_test.go | 9 +- pkg/cmd/pipelinerun/describe.go | 6 +- pkg/cmd/pipelinerun/describe_test.go | 86 ++- pkg/cmd/pipelinerun/list_test.go | 13 +- pkg/cmd/pipelinerun/log_test.go | 92 ++- pkg/cmd/task/list_test.go | 7 +- pkg/cmd/taskrun/list_test.go | 14 +- pkg/cmd/taskrun/logs_test.go | 40 +- pkg/formatted/k8s.go | 4 +- pkg/helper/pipelinerun/tracker_test.go | 59 +- pkg/helper/pods/container_test.go | 2 +- pkg/helper/pods/pod_test.go | 7 +- pkg/test/helpers.go | 7 + .../exporter/prometheus/.gitignore | 1 + .../exporter/prometheus/.travis.yml | 17 + .../exporter/prometheus}/LICENSE | 0 .../exporter/prometheus/Makefile | 95 +++ .../exporter/prometheus/README.md | 14 + .../exporter/prometheus/go.mod | 6 + .../exporter/prometheus/go.sum | 51 ++ .../exporter/prometheus/prometheus.go | 282 ++++----- .../exporter/prometheus/sanitize.go | 50 ++ vendor/github.com/golang/groupcache/LICENSE | 191 ++++++ .../github.com/golang/groupcache/lru/lru.go | 133 ++++ .../google/go-cmp/cmp/cmpopts/equate.go | 89 +++ .../google/go-cmp/cmp/cmpopts/ignore.go | 207 ++++++ .../google/go-cmp/cmp/cmpopts/sort.go | 147 +++++ .../go-cmp/cmp/cmpopts/struct_filter.go | 182 ++++++ .../google/go-cmp/cmp/cmpopts/xform.go | 35 + .../github.com/google/go-cmp/cmp/compare.go | 557 ++++++++-------- .../cmp/{unsafe_panic.go => export_panic.go} | 6 +- .../{unsafe_reflect.go => export_unsafe.go} | 8 +- .../go-cmp/cmp/internal/diff/debug_disable.go | 2 +- .../go-cmp/cmp/internal/diff/debug_enable.go | 4 +- .../google/go-cmp/cmp/internal/diff/diff.go | 31 +- .../google/go-cmp/cmp/internal/flags/flags.go | 9 + .../cmp/internal/flags/toolchain_legacy.go | 10 + .../cmp/internal/flags/toolchain_recent.go | 10 + .../go-cmp/cmp/internal/function/func.go | 64 +- .../go-cmp/cmp/internal/value/format.go | 277 -------- .../cmp/internal/value/pointer_purego.go | 23 + .../cmp/internal/value/pointer_unsafe.go | 26 + .../google/go-cmp/cmp/internal/value/sort.go | 11 +- .../google/go-cmp/cmp/internal/value/zero.go | 48 ++ .../github.com/google/go-cmp/cmp/options.go | 255 +++++--- vendor/github.com/google/go-cmp/cmp/path.go | 339 +++++----- vendor/github.com/google/go-cmp/cmp/report.go | 51 ++ .../google/go-cmp/cmp/report_compare.go | 296 +++++++++ .../google/go-cmp/cmp/report_reflect.go | 278 ++++++++ .../google/go-cmp/cmp/report_slices.go | 333 ++++++++++ .../google/go-cmp/cmp/report_text.go | 387 ++++++++++++ .../google/go-cmp/cmp/report_value.go | 121 ++++ .../github.com/google/go-cmp/cmp/reporter.go | 53 -- vendor/github.com/google/uuid/.travis.yml | 9 + vendor/github.com/google/uuid/CONTRIBUTING.md | 10 + vendor/github.com/google/uuid/CONTRIBUTORS | 9 + vendor/github.com/google/uuid/LICENSE | 27 + vendor/github.com/google/uuid/README.md | 19 + vendor/github.com/google/uuid/dce.go | 80 +++ vendor/github.com/google/uuid/doc.go | 12 + vendor/github.com/google/uuid/go.mod | 1 + vendor/github.com/google/uuid/hash.go | 53 ++ vendor/github.com/google/uuid/marshal.go | 37 ++ vendor/github.com/google/uuid/node.go | 90 +++ vendor/github.com/google/uuid/node_js.go | 12 + vendor/github.com/google/uuid/node_net.go | 33 + vendor/github.com/google/uuid/sql.go | 59 ++ vendor/github.com/google/uuid/time.go | 123 ++++ vendor/github.com/google/uuid/util.go | 43 ++ vendor/github.com/google/uuid/uuid.go | 245 +++++++ vendor/github.com/google/uuid/version1.go | 44 ++ vendor/github.com/google/uuid/version4.go | 38 ++ vendor/github.com/knative/pkg/apis/OWNERS | 6 - .../github.com/knative/pkg/apis/contexts.go | 77 --- .../github.com/knative/pkg/controller/OWNERS | 6 - vendor/github.com/knative/pkg/logging/OWNERS | 6 - vendor/github.com/knative/pkg/test/OWNERS | 7 - .../pipeline/pkg/apis/config/default.go | 4 +- .../pipeline/pkg/apis/config/store.go | 4 +- .../pkg/apis/config/zz_generated.deepcopy.go | 2 +- .../pipeline/pkg/apis/pipeline/register.go | 25 +- .../apis/pipeline/v1alpha1/artifact_bucket.go | 24 +- .../apis/pipeline/v1alpha1/artifact_pvc.go | 39 +- .../pipeline/v1alpha1/build_gcs_resource.go | 53 +- .../pipeline/v1alpha1/cloud_event_resource.go | 88 +++ .../pipeline/v1alpha1/cluster_resource.go | 20 +- .../v1alpha1/cluster_task_defaults.go | 25 + .../pipeline/v1alpha1/cluster_task_types.go | 34 +- .../v1alpha1/cluster_task_validation.go | 2 +- .../pipeline/v1alpha1/condition_defaults.go | 26 + .../apis/pipeline/v1alpha1/condition_types.go | 108 ++++ .../pipeline/v1alpha1/condition_validation.go | 42 ++ .../pkg/apis/pipeline/v1alpha1/contexts.go | 24 + .../pkg/apis/pipeline/v1alpha1/doc.go | 2 +- .../apis/pipeline/v1alpha1/gcs_resource.go | 55 +- .../apis/pipeline/v1alpha1/git_resource.go | 28 +- .../apis/pipeline/v1alpha1/image_resource.go | 19 +- .../pipeline/v1alpha1}/merge.go | 45 +- .../pipeline/v1alpha1/metadata_validation.go | 5 +- .../pkg/apis/pipeline/v1alpha1/param_types.go | 95 ++- .../pipeline/v1alpha1/pipeline_defaults.go | 5 +- .../apis/pipeline/v1alpha1/pipeline_types.go | 19 +- .../pipeline/v1alpha1/pipeline_validation.go | 67 +- .../v1alpha1/pipelineresource_validation.go | 2 +- .../pipeline/v1alpha1/pipelinerun_defaults.go | 13 +- .../pipeline/v1alpha1/pipelinerun_types.go | 37 +- .../v1alpha1/pipelinerun_validation.go | 6 +- .../pkg/apis/pipeline/v1alpha1/pod.go | 62 ++ .../v1alpha1/pull_request_resource.go | 29 +- .../pkg/apis/pipeline/v1alpha1/register.go | 2 + .../pipeline/v1alpha1/resource_defaults.go | 2 +- .../apis/pipeline/v1alpha1/resource_types.go | 20 +- .../apis/pipeline/v1alpha1/result_types.go | 4 +- .../pipeline/v1alpha1/secret_volume_mount.go | 2 +- .../pipeline/v1alpha1/step_replacements.go | 67 ++ .../pipeline/v1alpha1/storage_resource.go | 33 +- .../apis/pipeline/v1alpha1/substitution.go | 156 +++++ .../apis/pipeline/v1alpha1/task_defaults.go | 11 +- .../apis/pipeline/v1alpha1/task_interface.go | 2 +- .../pkg/apis/pipeline/v1alpha1/task_types.go | 15 +- .../apis/pipeline/v1alpha1/task_validation.go | 118 +++- .../pipeline/v1alpha1/taskrun_defaults.go | 16 +- .../apis/pipeline/v1alpha1/taskrun_types.go | 76 ++- .../pipeline/v1alpha1/taskrun_validation.go | 18 +- .../v1alpha1/zz_generated.deepcopy.go | 596 +++++++++++++----- .../pkg/artifacts/artifacts_storage.go | 23 +- .../client/clientset/versioned/clientset.go | 3 + .../pkg/client/clientset/versioned/doc.go | 3 + .../versioned/fake/clientset_generated.go | 3 + .../client/clientset/versioned/fake/doc.go | 3 + .../clientset/versioned/fake/register.go | 7 +- .../client/clientset/versioned/scheme/doc.go | 3 + .../clientset/versioned/scheme/register.go | 7 +- .../typed/pipeline/v1alpha1/clustertask.go | 3 + .../typed/pipeline/v1alpha1/condition.go | 157 +++++ .../versioned/typed/pipeline/v1alpha1/doc.go | 3 + .../typed/pipeline/v1alpha1/fake/doc.go | 3 + .../v1alpha1/fake/fake_clustertask.go | 3 + .../pipeline/v1alpha1/fake/fake_condition.go | 128 ++++ .../pipeline/v1alpha1/fake/fake_pipeline.go | 3 + .../v1alpha1/fake/fake_pipeline_client.go | 7 + .../v1alpha1/fake/fake_pipelineresource.go | 3 + .../v1alpha1/fake/fake_pipelinerun.go | 3 + .../typed/pipeline/v1alpha1/fake/fake_task.go | 3 + .../pipeline/v1alpha1/fake/fake_taskrun.go | 3 + .../pipeline/v1alpha1/generated_expansion.go | 5 + .../typed/pipeline/v1alpha1/pipeline.go | 3 + .../pipeline/v1alpha1/pipeline_client.go | 8 + .../pipeline/v1alpha1/pipelineresource.go | 3 + .../typed/pipeline/v1alpha1/pipelinerun.go | 3 + .../versioned/typed/pipeline/v1alpha1/task.go | 3 + .../typed/pipeline/v1alpha1/taskrun.go | 3 + .../informers/externalversions/factory.go | 3 + .../informers/externalversions/generic.go | 5 + .../internalinterfaces/factory_interfaces.go | 3 + .../externalversions/pipeline/interface.go | 3 + .../pipeline/v1alpha1/clustertask.go | 9 +- .../pipeline/v1alpha1/condition.go | 89 +++ .../pipeline/v1alpha1/interface.go | 10 + .../pipeline/v1alpha1/pipeline.go | 9 +- .../pipeline/v1alpha1/pipelineresource.go | 9 +- .../pipeline/v1alpha1/pipelinerun.go | 9 +- .../pipeline/v1alpha1/task.go | 9 +- .../pipeline/v1alpha1/taskrun.go | 9 +- .../pkg/client/injection/client/client.go | 49 ++ .../pkg/client/injection/client/fake/fake.go | 54 ++ .../informers/pipeline/factory/fake/fake.go | 41 ++ .../pipeline/factory/pipelinefactory.go | 52 ++ .../v1alpha1/clustertask/clustertask.go | 52 ++ .../v1alpha1/clustertask/fake/fake.go | 40 ++ .../pipeline/v1alpha1/condition/condition.go | 52 ++ .../pipeline/v1alpha1/condition/fake/fake.go | 40 ++ .../pipeline/v1alpha1/pipeline/fake/fake.go | 40 ++ .../pipeline/v1alpha1/pipeline/pipeline.go | 52 ++ .../v1alpha1/pipelineresource/fake/fake.go | 40 ++ .../pipelineresource/pipelineresource.go | 52 ++ .../v1alpha1/pipelinerun/fake/fake.go | 40 ++ .../v1alpha1/pipelinerun/pipelinerun.go | 52 ++ .../pipeline/v1alpha1/task/fake/fake.go | 40 ++ .../informers/pipeline/v1alpha1/task/task.go | 52 ++ .../pipeline/v1alpha1/taskrun/fake/fake.go | 40 ++ .../pipeline/v1alpha1/taskrun/taskrun.go | 52 ++ .../listers/pipeline/v1alpha1/clustertask.go | 3 + .../listers/pipeline/v1alpha1/condition.go | 94 +++ .../pipeline/v1alpha1/expansion_generated.go | 11 + .../listers/pipeline/v1alpha1/pipeline.go | 3 + .../pipeline/v1alpha1/pipelineresource.go | 3 + .../listers/pipeline/v1alpha1/pipelinerun.go | 3 + .../client/listers/pipeline/v1alpha1/task.go | 3 + .../listers/pipeline/v1alpha1/taskrun.go | 3 + .../tektoncd/pipeline/pkg/list/diff.go | 2 +- .../v1alpha1/pipelinerun/resources/apply.go | 68 +- .../resources/conditionresolution.go | 134 ++++ .../resources/pipelinerunresolution.go | 228 +++++-- .../pipelinerun/resources/validate_params.go | 47 ++ .../v1alpha1/taskrun/entrypoint/entrypoint.go | 34 +- .../v1alpha1/taskrun/resources/apply.go | 113 ++-- .../taskrun/resources/image_exporter.go | 18 +- .../taskrun/resources/input_resources.go | 92 +-- .../taskrun/resources/output_resource.go | 95 +-- .../v1alpha1/taskrun/resources/pod.go | 124 ++-- .../resources/taskresourceresolution.go | 2 +- .../v1alpha1/taskrun/resources/taskspec.go | 2 +- .../pipeline/pkg/templating/templating.go | 71 --- .../github.com/tektoncd/pipeline/test/adoc.go | 5 +- .../tektoncd/pipeline/test/build_logs.go | 2 +- .../tektoncd/pipeline/test/builder/README.md | 2 +- .../pipeline/test/builder/condition.go | 84 +++ .../pipeline/test/builder/container.go | 3 + .../tektoncd/pipeline/test/builder/doc.go | 3 + .../pipeline/test/builder/owner_reference.go | 3 + .../tektoncd/pipeline/test/builder/param.go | 50 ++ .../pipeline/test/builder/pipeline.go | 90 ++- .../tektoncd/pipeline/test/builder/pod.go | 3 + .../tektoncd/pipeline/test/builder/step.go | 136 ++++ .../tektoncd/pipeline/test/builder/task.go | 69 +- .../tektoncd/pipeline/test/clients.go | 9 +- .../tektoncd/pipeline/test/controller.go | 110 ++-- .../tektoncd/pipeline/test/e2e-common.sh | 2 +- .../tektoncd/pipeline/test/secret.go | 7 +- .../github.com/tektoncd/pipeline/test/wait.go | 4 +- vendor/go.opencensus.io/Makefile | 3 +- vendor/go.opencensus.io/exemplar/exemplar.go | 79 --- vendor/go.opencensus.io/go.mod | 5 +- vendor/go.opencensus.io/go.sum | 112 +--- .../go.opencensus.io/metric/metricdata/doc.go | 19 + .../metric/metricdata/exemplar.go | 38 ++ .../metric/metricdata/label.go | 35 + .../metric/metricdata/metric.go | 46 ++ .../metric/metricdata/point.go | 193 ++++++ .../metric/metricdata/type_string.go | 16 + .../exemplar.go => metric/metricdata/unit.go} | 36 +- .../metric/metricexport/doc.go | 19 + .../metric/metricexport/export.go | 26 + .../metric/metricexport/reader.go | 187 ++++++ .../metric/metricproducer/manager.go | 78 +++ .../metric/metricproducer/producer.go | 28 + vendor/go.opencensus.io/opencensus.go | 2 +- .../plugin/ocgrpc/stats_common.go | 53 +- .../plugin/ochttp/client_stats.go | 2 +- .../go.opencensus.io/plugin/ochttp/server.go | 9 + .../go.opencensus.io/plugin/ochttp/trace.go | 17 +- .../plugin/ochttp/wrapped_body.go | 44 ++ vendor/go.opencensus.io/resource/resource.go | 164 +++++ .../go.opencensus.io/stats/internal/record.go | 2 +- vendor/go.opencensus.io/stats/record.go | 88 ++- .../stats/view/aggregation_data.go | 138 ++-- .../go.opencensus.io/stats/view/collector.go | 7 +- vendor/go.opencensus.io/stats/view/view.go | 19 +- .../stats/view/view_to_metric.go | 140 ++++ vendor/go.opencensus.io/stats/view/worker.go | 59 +- .../stats/view/worker_commands.go | 17 +- vendor/go.opencensus.io/tag/context.go | 24 - vendor/go.opencensus.io/tag/map.go | 66 +- vendor/go.opencensus.io/tag/map_codec.go | 12 +- vendor/go.opencensus.io/tag/metadata.go | 52 ++ vendor/go.opencensus.io/tag/profile_19.go | 2 +- vendor/go.uber.org/zap/internal/ztest/doc.go | 24 + .../go.uber.org/zap/internal/ztest/timeout.go | 59 ++ .../go.uber.org/zap/internal/ztest/writer.go | 96 +++ vendor/go.uber.org/zap/zaptest/doc.go | 22 + vendor/go.uber.org/zap/zaptest/logger.go | 140 ++++ vendor/go.uber.org/zap/zaptest/testingt.go | 47 ++ vendor/go.uber.org/zap/zaptest/timeout.go | 45 ++ vendor/go.uber.org/zap/zaptest/writer.go | 44 ++ vendor/k8s.io/client-go/tools/record/OWNERS | 27 + vendor/k8s.io/client-go/tools/record/doc.go | 18 + vendor/k8s.io/client-go/tools/record/event.go | 322 ++++++++++ .../client-go/tools/record/events_cache.go | 462 ++++++++++++++ vendor/k8s.io/client-go/tools/record/fake.go | 58 ++ vendor/knative.dev/pkg/LICENSE | 201 ++++++ .../metrics => knative.dev/pkg/apis}/OWNERS | 3 +- .../pkg/apis/condition_set.go | 0 .../pkg/apis/condition_types.go | 1 + vendor/knative.dev/pkg/apis/contexts.go | 182 ++++++ vendor/knative.dev/pkg/apis/deprecated.go | 180 ++++++ .../knative => knative.dev}/pkg/apis/doc.go | 0 .../pkg/apis/duck}/OWNERS | 3 +- .../pkg/apis/duck/cached.go | 0 .../pkg/apis/duck/doc.go | 0 .../pkg/apis/duck/enqueue.go | 0 .../pkg/apis/duck/interface.go | 0 .../pkg/apis/duck/patch.go | 0 .../pkg/apis/duck/proxy.go | 0 .../pkg/apis/duck/register.go | 0 .../pkg/apis/duck/typed.go | 64 +- .../pkg/apis/duck/unstructured.go | 0 .../apis/duck/v1beta1/addressable_types.go | 97 +++ .../pkg/apis/duck/v1beta1/doc.go | 0 .../pkg/apis/duck/v1beta1/register.go | 4 +- .../pkg/apis/duck/v1beta1/status_types.go | 5 +- .../duck/v1beta1/zz_generated.deepcopy.go | 103 +++ .../pkg/apis/duck/verify.go | 2 +- .../pkg/apis/field_error.go | 26 + .../pkg/apis/interfaces.go | 7 + .../pkg/apis/kind2resource.go | 0 .../pkg/apis/metadata_validation.go | 0 vendor/knative.dev/pkg/apis/url.go | 73 +++ .../pkg/apis/volatile_time.go | 1 + .../pkg/apis/zz_generated.deepcopy.go | 25 + .../pkg/changeset/commit.go | 0 .../pkg/changeset/doc.go | 0 .../duck => knative.dev/pkg/configmap}/OWNERS | 3 +- .../pkg/configmap/doc.go | 0 .../pkg/configmap/filter.go | 0 .../pkg/configmap/informed_watcher.go | 67 +- .../pkg/configmap/load.go | 0 .../pkg/configmap/manual_watcher.go | 1 - .../pkg/configmap/static_watcher.go | 0 .../pkg/configmap/store.go | 5 +- .../pkg/configmap/watcher.go | 13 +- vendor/knative.dev/pkg/controller/OWNERS | 4 + .../pkg/controller/controller.go | 105 ++- .../pkg/controller/helper.go | 17 +- .../pkg/controller/stats_reporter.go | 13 +- .../pkg/injection}/OWNERS | 2 +- vendor/knative.dev/pkg/injection/README.md | 218 +++++++ vendor/knative.dev/pkg/injection/clients.go | 42 ++ .../injection/clients/kubeclient/fake/fake.go | 53 ++ .../clients/kubeclient/kubeclient.go | 49 ++ vendor/knative.dev/pkg/injection/doc.go | 68 ++ vendor/knative.dev/pkg/injection/factories.go | 40 ++ vendor/knative.dev/pkg/injection/informers.go | 68 ++ .../kubeinformers/corev1/pod/fake/fake.go | 38 ++ .../informers/kubeinformers/corev1/pod/pod.go | 52 ++ .../kubeinformers/factory/factory.go | 52 ++ .../kubeinformers/factory/fake/fake.go | 40 ++ vendor/knative.dev/pkg/injection/interface.go | 84 +++ vendor/knative.dev/pkg/kmeta/OWNERS | 4 + .../pkg/kmeta/accessor.go | 0 .../knative => knative.dev}/pkg/kmeta/doc.go | 0 .../pkg/kmeta/labels.go | 0 vendor/knative.dev/pkg/kmeta/names.go | 41 ++ .../pkg/kmeta/owner_references.go | 0 .../pkg/kmeta/ownerrefable_accessor.go | 25 + .../knative => knative.dev}/pkg/kmp/diff.go | 27 + .../knative => knative.dev}/pkg/kmp/doc.go | 0 vendor/knative.dev/pkg/kmp/reporters.go | 148 +++++ vendor/knative.dev/pkg/logging/OWNERS | 4 + .../pkg/logging/config.go | 53 +- .../pkg/logging/logger.go | 0 .../pkg/logging/logkey/constants.go | 3 + .../knative.dev/pkg/logging/testing/util.go | 68 ++ .../pkg/logging/zz_generated.deepcopy.go | 0 vendor/knative.dev/pkg/metrics/OWNERS | 4 + .../pkg/metrics/config.go | 43 +- .../pkg/metrics/doc.go | 0 .../pkg/metrics/exporter.go | 21 + .../pkg/metrics/gcp_metadata.go | 4 +- .../pkg/metrics/metricskey/constants.go | 0 .../pkg/metrics/monitored_resources.go | 2 +- .../pkg/metrics/prometheus_exporter.go | 2 +- .../pkg/metrics/record.go | 13 +- .../pkg/metrics/stackdriver_exporter.go | 2 +- .../pkg/reconciler/testing/actions.go | 76 +++ .../pkg/reconciler/testing/clock.go | 29 + .../pkg/reconciler/testing/context.go | 35 + .../pkg/reconciler/testing/events.go | 44 ++ .../testing/generate_name_reactor.go | 86 +++ .../pkg/reconciler/testing/hooks.go | 183 ++++++ .../pkg/reconciler/testing/reactions.go | 66 ++ .../pkg/reconciler/testing/sorter.go | 93 +++ .../pkg/reconciler/testing/stats.go | 40 ++ .../pkg/reconciler/testing/table.go | 365 +++++++++++ .../pkg/reconciler/testing/tracker.go | 34 + .../pkg/reconciler/testing/util.go | 85 +++ vendor/knative.dev/pkg/system/clock.go | 32 + vendor/knative.dev/pkg/system/names.go | 52 ++ .../knative.dev/pkg/system/testing/names.go | 27 + vendor/knative.dev/pkg/test/OWNERS | 10 + .../pkg/test/README.md | 9 +- .../pkg/test/cleanup.go | 2 +- .../pkg/test/clients.go | 4 +- .../knative => knative.dev}/pkg/test/crd.go | 0 .../pkg/test/e2e_flags.go | 0 .../pkg/test/ingress/ingress.go | 0 .../pkg/test/kube_checks.go | 21 +- .../pkg/test/logging/logging.go | 2 +- .../pkg/test/monitoring/doc.go | 0 .../pkg/test/monitoring/monitoring.go | 2 +- .../pkg/test/presubmit-tests.sh | 4 + .../pkg/test/request.go | 33 +- .../pkg/test/spoof/error_checks.go | 0 .../pkg/test/spoof/spoof.go | 8 +- .../pkg/test/zipkin/doc.go | 0 .../pkg/test/zipkin/util.go | 4 +- vendor/knative.dev/pkg/tracker/doc.go | 21 + vendor/knative.dev/pkg/tracker/enqueue.go | 169 +++++ vendor/knative.dev/pkg/tracker/interface.go | 33 + vendor/modules.txt | 127 ++-- 401 files changed, 16054 insertions(+), 3012 deletions(-) create mode 100644 vendor/contrib.go.opencensus.io/exporter/prometheus/.gitignore create mode 100644 vendor/contrib.go.opencensus.io/exporter/prometheus/.travis.yml rename vendor/{github.com/knative/pkg => contrib.go.opencensus.io/exporter/prometheus}/LICENSE (100%) create mode 100644 vendor/contrib.go.opencensus.io/exporter/prometheus/Makefile create mode 100644 vendor/contrib.go.opencensus.io/exporter/prometheus/README.md create mode 100644 vendor/contrib.go.opencensus.io/exporter/prometheus/go.mod create mode 100644 vendor/contrib.go.opencensus.io/exporter/prometheus/go.sum rename vendor/{go.opencensus.io => contrib.go.opencensus.io}/exporter/prometheus/prometheus.go (50%) create mode 100644 vendor/contrib.go.opencensus.io/exporter/prometheus/sanitize.go create mode 100644 vendor/github.com/golang/groupcache/LICENSE create mode 100644 vendor/github.com/golang/groupcache/lru/lru.go create mode 100644 vendor/github.com/google/go-cmp/cmp/cmpopts/equate.go create mode 100644 vendor/github.com/google/go-cmp/cmp/cmpopts/ignore.go create mode 100644 vendor/github.com/google/go-cmp/cmp/cmpopts/sort.go create mode 100644 vendor/github.com/google/go-cmp/cmp/cmpopts/struct_filter.go create mode 100644 vendor/github.com/google/go-cmp/cmp/cmpopts/xform.go rename vendor/github.com/google/go-cmp/cmp/{unsafe_panic.go => export_panic.go} (60%) rename vendor/github.com/google/go-cmp/cmp/{unsafe_reflect.go => export_unsafe.go} (64%) create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go delete mode 100644 vendor/github.com/google/go-cmp/cmp/internal/value/format.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/value/zero.go create mode 100644 vendor/github.com/google/go-cmp/cmp/report.go create mode 100644 vendor/github.com/google/go-cmp/cmp/report_compare.go create mode 100644 vendor/github.com/google/go-cmp/cmp/report_reflect.go create mode 100644 vendor/github.com/google/go-cmp/cmp/report_slices.go create mode 100644 vendor/github.com/google/go-cmp/cmp/report_text.go create mode 100644 vendor/github.com/google/go-cmp/cmp/report_value.go delete mode 100644 vendor/github.com/google/go-cmp/cmp/reporter.go create mode 100644 vendor/github.com/google/uuid/.travis.yml create mode 100644 vendor/github.com/google/uuid/CONTRIBUTING.md create mode 100644 vendor/github.com/google/uuid/CONTRIBUTORS create mode 100644 vendor/github.com/google/uuid/LICENSE create mode 100644 vendor/github.com/google/uuid/README.md create mode 100644 vendor/github.com/google/uuid/dce.go create mode 100644 vendor/github.com/google/uuid/doc.go create mode 100644 vendor/github.com/google/uuid/go.mod create mode 100644 vendor/github.com/google/uuid/hash.go create mode 100644 vendor/github.com/google/uuid/marshal.go create mode 100644 vendor/github.com/google/uuid/node.go create mode 100644 vendor/github.com/google/uuid/node_js.go create mode 100644 vendor/github.com/google/uuid/node_net.go create mode 100644 vendor/github.com/google/uuid/sql.go create mode 100644 vendor/github.com/google/uuid/time.go create mode 100644 vendor/github.com/google/uuid/util.go create mode 100644 vendor/github.com/google/uuid/uuid.go create mode 100644 vendor/github.com/google/uuid/version1.go create mode 100644 vendor/github.com/google/uuid/version4.go delete mode 100644 vendor/github.com/knative/pkg/apis/OWNERS delete mode 100644 vendor/github.com/knative/pkg/apis/contexts.go delete mode 100644 vendor/github.com/knative/pkg/controller/OWNERS delete mode 100644 vendor/github.com/knative/pkg/logging/OWNERS delete mode 100644 vendor/github.com/knative/pkg/test/OWNERS create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/cloud_event_resource.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/cluster_task_defaults.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/condition_defaults.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/condition_types.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/condition_validation.go rename vendor/github.com/tektoncd/pipeline/pkg/{merge => apis/pipeline/v1alpha1}/merge.go (57%) create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pod.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/step_replacements.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/substitution.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/condition.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/fake/fake_condition.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1alpha1/condition.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/injection/client/client.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/injection/client/fake/fake.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/factory/fake/fake.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/factory/pipelinefactory.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/clustertask/clustertask.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/clustertask/fake/fake.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/condition/condition.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/condition/fake/fake.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/pipeline/fake/fake.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/pipeline/pipeline.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/pipelineresource/fake/fake.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/pipelineresource/pipelineresource.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/pipelinerun/fake/fake.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/pipelinerun/pipelinerun.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/task/fake/fake.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/task/task.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/taskrun/fake/fake.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/taskrun/taskrun.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1alpha1/condition.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/reconciler/v1alpha1/pipelinerun/resources/conditionresolution.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/reconciler/v1alpha1/pipelinerun/resources/validate_params.go delete mode 100644 vendor/github.com/tektoncd/pipeline/pkg/templating/templating.go create mode 100644 vendor/github.com/tektoncd/pipeline/test/builder/condition.go create mode 100644 vendor/github.com/tektoncd/pipeline/test/builder/param.go create mode 100644 vendor/github.com/tektoncd/pipeline/test/builder/step.go delete mode 100644 vendor/go.opencensus.io/exemplar/exemplar.go create mode 100644 vendor/go.opencensus.io/metric/metricdata/doc.go create mode 100644 vendor/go.opencensus.io/metric/metricdata/exemplar.go create mode 100644 vendor/go.opencensus.io/metric/metricdata/label.go create mode 100644 vendor/go.opencensus.io/metric/metricdata/metric.go create mode 100644 vendor/go.opencensus.io/metric/metricdata/point.go create mode 100644 vendor/go.opencensus.io/metric/metricdata/type_string.go rename vendor/go.opencensus.io/{trace/exemplar.go => metric/metricdata/unit.go} (52%) create mode 100644 vendor/go.opencensus.io/metric/metricexport/doc.go create mode 100644 vendor/go.opencensus.io/metric/metricexport/export.go create mode 100644 vendor/go.opencensus.io/metric/metricexport/reader.go create mode 100644 vendor/go.opencensus.io/metric/metricproducer/manager.go create mode 100644 vendor/go.opencensus.io/metric/metricproducer/producer.go create mode 100644 vendor/go.opencensus.io/plugin/ochttp/wrapped_body.go create mode 100644 vendor/go.opencensus.io/resource/resource.go create mode 100644 vendor/go.opencensus.io/stats/view/view_to_metric.go create mode 100644 vendor/go.opencensus.io/tag/metadata.go create mode 100644 vendor/go.uber.org/zap/internal/ztest/doc.go create mode 100644 vendor/go.uber.org/zap/internal/ztest/timeout.go create mode 100644 vendor/go.uber.org/zap/internal/ztest/writer.go create mode 100644 vendor/go.uber.org/zap/zaptest/doc.go create mode 100644 vendor/go.uber.org/zap/zaptest/logger.go create mode 100644 vendor/go.uber.org/zap/zaptest/testingt.go create mode 100644 vendor/go.uber.org/zap/zaptest/timeout.go create mode 100644 vendor/go.uber.org/zap/zaptest/writer.go create mode 100644 vendor/k8s.io/client-go/tools/record/OWNERS create mode 100644 vendor/k8s.io/client-go/tools/record/doc.go create mode 100644 vendor/k8s.io/client-go/tools/record/event.go create mode 100644 vendor/k8s.io/client-go/tools/record/events_cache.go create mode 100644 vendor/k8s.io/client-go/tools/record/fake.go create mode 100644 vendor/knative.dev/pkg/LICENSE rename vendor/{github.com/knative/pkg/metrics => knative.dev/pkg/apis}/OWNERS (77%) rename vendor/{github.com/knative => knative.dev}/pkg/apis/condition_set.go (100%) rename vendor/{github.com/knative => knative.dev}/pkg/apis/condition_types.go (99%) create mode 100644 vendor/knative.dev/pkg/apis/contexts.go create mode 100644 vendor/knative.dev/pkg/apis/deprecated.go rename vendor/{github.com/knative => knative.dev}/pkg/apis/doc.go (100%) rename vendor/{github.com/knative/pkg/kmeta => knative.dev/pkg/apis/duck}/OWNERS (76%) rename vendor/{github.com/knative => knative.dev}/pkg/apis/duck/cached.go (100%) rename vendor/{github.com/knative => knative.dev}/pkg/apis/duck/doc.go (100%) rename vendor/{github.com/knative => knative.dev}/pkg/apis/duck/enqueue.go (100%) rename vendor/{github.com/knative => knative.dev}/pkg/apis/duck/interface.go (100%) rename vendor/{github.com/knative => knative.dev}/pkg/apis/duck/patch.go (100%) rename vendor/{github.com/knative => knative.dev}/pkg/apis/duck/proxy.go (100%) rename vendor/{github.com/knative => knative.dev}/pkg/apis/duck/register.go (100%) rename vendor/{github.com/knative => knative.dev}/pkg/apis/duck/typed.go (75%) rename vendor/{github.com/knative => knative.dev}/pkg/apis/duck/unstructured.go (100%) create mode 100644 vendor/knative.dev/pkg/apis/duck/v1beta1/addressable_types.go rename vendor/{github.com/knative => knative.dev}/pkg/apis/duck/v1beta1/doc.go (100%) rename vendor/{github.com/knative => knative.dev}/pkg/apis/duck/v1beta1/register.go (94%) rename vendor/{github.com/knative => knative.dev}/pkg/apis/duck/v1beta1/status_types.go (98%) rename vendor/{github.com/knative => knative.dev}/pkg/apis/duck/v1beta1/zz_generated.deepcopy.go (54%) rename vendor/{github.com/knative => knative.dev}/pkg/apis/duck/verify.go (99%) rename vendor/{github.com/knative => knative.dev}/pkg/apis/field_error.go (91%) rename vendor/{github.com/knative => knative.dev}/pkg/apis/interfaces.go (90%) rename vendor/{github.com/knative => knative.dev}/pkg/apis/kind2resource.go (100%) rename vendor/{github.com/knative => knative.dev}/pkg/apis/metadata_validation.go (100%) create mode 100644 vendor/knative.dev/pkg/apis/url.go rename vendor/{github.com/knative => knative.dev}/pkg/apis/volatile_time.go (97%) rename vendor/{github.com/knative => knative.dev}/pkg/apis/zz_generated.deepcopy.go (84%) rename vendor/{github.com/knative => knative.dev}/pkg/changeset/commit.go (100%) rename vendor/{github.com/knative => knative.dev}/pkg/changeset/doc.go (100%) rename vendor/{github.com/knative/pkg/apis/duck => knative.dev/pkg/configmap}/OWNERS (75%) rename vendor/{github.com/knative => knative.dev}/pkg/configmap/doc.go (100%) rename vendor/{github.com/knative => knative.dev}/pkg/configmap/filter.go (100%) rename vendor/{github.com/knative => knative.dev}/pkg/configmap/informed_watcher.go (58%) rename vendor/{github.com/knative => knative.dev}/pkg/configmap/load.go (100%) rename vendor/{github.com/knative => knative.dev}/pkg/configmap/manual_watcher.go (99%) rename vendor/{github.com/knative => knative.dev}/pkg/configmap/static_watcher.go (100%) rename vendor/{github.com/knative => knative.dev}/pkg/configmap/store.go (96%) rename vendor/{github.com/knative => knative.dev}/pkg/configmap/watcher.go (70%) create mode 100644 vendor/knative.dev/pkg/controller/OWNERS rename vendor/{github.com/knative => knative.dev}/pkg/controller/controller.go (76%) rename vendor/{github.com/knative => knative.dev}/pkg/controller/helper.go (70%) rename vendor/{github.com/knative => knative.dev}/pkg/controller/stats_reporter.go (91%) rename vendor/{github.com/knative/pkg/configmap => knative.dev/pkg/injection}/OWNERS (88%) create mode 100644 vendor/knative.dev/pkg/injection/README.md create mode 100644 vendor/knative.dev/pkg/injection/clients.go create mode 100644 vendor/knative.dev/pkg/injection/clients/kubeclient/fake/fake.go create mode 100644 vendor/knative.dev/pkg/injection/clients/kubeclient/kubeclient.go create mode 100644 vendor/knative.dev/pkg/injection/doc.go create mode 100644 vendor/knative.dev/pkg/injection/factories.go create mode 100644 vendor/knative.dev/pkg/injection/informers.go create mode 100644 vendor/knative.dev/pkg/injection/informers/kubeinformers/corev1/pod/fake/fake.go create mode 100644 vendor/knative.dev/pkg/injection/informers/kubeinformers/corev1/pod/pod.go create mode 100644 vendor/knative.dev/pkg/injection/informers/kubeinformers/factory/factory.go create mode 100644 vendor/knative.dev/pkg/injection/informers/kubeinformers/factory/fake/fake.go create mode 100644 vendor/knative.dev/pkg/injection/interface.go create mode 100644 vendor/knative.dev/pkg/kmeta/OWNERS rename vendor/{github.com/knative => knative.dev}/pkg/kmeta/accessor.go (100%) rename vendor/{github.com/knative => knative.dev}/pkg/kmeta/doc.go (100%) rename vendor/{github.com/knative => knative.dev}/pkg/kmeta/labels.go (100%) create mode 100644 vendor/knative.dev/pkg/kmeta/names.go rename vendor/{github.com/knative => knative.dev}/pkg/kmeta/owner_references.go (100%) create mode 100644 vendor/knative.dev/pkg/kmeta/ownerrefable_accessor.go rename vendor/{github.com/knative => knative.dev}/pkg/kmp/diff.go (62%) rename vendor/{github.com/knative => knative.dev}/pkg/kmp/doc.go (100%) create mode 100644 vendor/knative.dev/pkg/kmp/reporters.go create mode 100644 vendor/knative.dev/pkg/logging/OWNERS rename vendor/{github.com/knative => knative.dev}/pkg/logging/config.go (82%) rename vendor/{github.com/knative => knative.dev}/pkg/logging/logger.go (100%) rename vendor/{github.com/knative => knative.dev}/pkg/logging/logkey/constants.go (94%) create mode 100644 vendor/knative.dev/pkg/logging/testing/util.go rename vendor/{github.com/knative => knative.dev}/pkg/logging/zz_generated.deepcopy.go (100%) create mode 100644 vendor/knative.dev/pkg/metrics/OWNERS rename vendor/{github.com/knative => knative.dev}/pkg/metrics/config.go (89%) rename vendor/{github.com/knative => knative.dev}/pkg/metrics/doc.go (100%) rename vendor/{github.com/knative => knative.dev}/pkg/metrics/exporter.go (82%) rename vendor/{github.com/knative => knative.dev}/pkg/metrics/gcp_metadata.go (91%) rename vendor/{github.com/knative => knative.dev}/pkg/metrics/metricskey/constants.go (100%) rename vendor/{github.com/knative => knative.dev}/pkg/metrics/monitored_resources.go (97%) rename vendor/{github.com/knative => knative.dev}/pkg/metrics/prometheus_exporter.go (97%) rename vendor/{github.com/knative => knative.dev}/pkg/metrics/record.go (80%) rename vendor/{github.com/knative => knative.dev}/pkg/metrics/stackdriver_exporter.go (99%) create mode 100644 vendor/knative.dev/pkg/reconciler/testing/actions.go create mode 100644 vendor/knative.dev/pkg/reconciler/testing/clock.go create mode 100644 vendor/knative.dev/pkg/reconciler/testing/context.go create mode 100644 vendor/knative.dev/pkg/reconciler/testing/events.go create mode 100644 vendor/knative.dev/pkg/reconciler/testing/generate_name_reactor.go create mode 100644 vendor/knative.dev/pkg/reconciler/testing/hooks.go create mode 100644 vendor/knative.dev/pkg/reconciler/testing/reactions.go create mode 100644 vendor/knative.dev/pkg/reconciler/testing/sorter.go create mode 100644 vendor/knative.dev/pkg/reconciler/testing/stats.go create mode 100644 vendor/knative.dev/pkg/reconciler/testing/table.go create mode 100644 vendor/knative.dev/pkg/reconciler/testing/tracker.go create mode 100644 vendor/knative.dev/pkg/reconciler/testing/util.go create mode 100644 vendor/knative.dev/pkg/system/clock.go create mode 100644 vendor/knative.dev/pkg/system/names.go create mode 100644 vendor/knative.dev/pkg/system/testing/names.go create mode 100644 vendor/knative.dev/pkg/test/OWNERS rename vendor/{github.com/knative => knative.dev}/pkg/test/README.md (96%) rename vendor/{github.com/knative => knative.dev}/pkg/test/cleanup.go (96%) rename vendor/{github.com/knative => knative.dev}/pkg/test/clients.go (98%) rename vendor/{github.com/knative => knative.dev}/pkg/test/crd.go (100%) rename vendor/{github.com/knative => knative.dev}/pkg/test/e2e_flags.go (100%) rename vendor/{github.com/knative => knative.dev}/pkg/test/ingress/ingress.go (100%) rename vendor/{github.com/knative => knative.dev}/pkg/test/kube_checks.go (85%) rename vendor/{github.com/knative => knative.dev}/pkg/test/logging/logging.go (99%) rename vendor/{github.com/knative => knative.dev}/pkg/test/monitoring/doc.go (100%) rename vendor/{github.com/knative => knative.dev}/pkg/test/monitoring/monitoring.go (98%) rename vendor/{github.com/knative => knative.dev}/pkg/test/presubmit-tests.sh (86%) rename vendor/{github.com/knative => knative.dev}/pkg/test/request.go (87%) rename vendor/{github.com/knative => knative.dev}/pkg/test/spoof/error_checks.go (100%) rename vendor/{github.com/knative => knative.dev}/pkg/test/spoof/spoof.go (98%) rename vendor/{github.com/knative => knative.dev}/pkg/test/zipkin/doc.go (100%) rename vendor/{github.com/knative => knative.dev}/pkg/test/zipkin/util.go (98%) create mode 100644 vendor/knative.dev/pkg/tracker/doc.go create mode 100644 vendor/knative.dev/pkg/tracker/enqueue.go create mode 100644 vendor/knative.dev/pkg/tracker/interface.go diff --git a/Makefile b/Makefile index 71ab55ee4..e2118c064 100644 --- a/Makefile +++ b/Makefile @@ -66,6 +66,10 @@ man: bin/docs ## update manpages clean: ## clean build artifacts rm -fR bin +.PHONY: fmt ## formats teh god code(excludes vendors dir) +fmt: + @go fmt $(go list ./... | grep -v /vendor/) + .PHONY: help help: ## print this help @awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z0-9_-]+:.*?## / {gsub("\\\\n",sprintf("\n%22c",""), $$2);printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST) diff --git a/go.mod b/go.mod index 87d0b4dd2..ff0ef7401 100644 --- a/go.mod +++ b/go.mod @@ -5,11 +5,13 @@ go 1.12 require ( cloud.google.com/go v0.37.2 // indirect contrib.go.opencensus.io/exporter/ocagent v0.2.0 // indirect + contrib.go.opencensus.io/exporter/prometheus v0.1.0 // indirect contrib.go.opencensus.io/exporter/stackdriver v0.9.1 // indirect github.com/AlecAivazis/survey/v2 v2.0.1 github.com/Azure/azure-sdk-for-go v26.1.0+incompatible // indirect github.com/Azure/go-autorest v11.6.0+incompatible // indirect github.com/Netflix/go-expect v0.0.0-20180928190340-9d1f4485533b + github.com/apache/thrift v0.12.0 // indirect github.com/aws/aws-sdk-go v1.19.11 // indirect github.com/blang/semver v3.5.1+incompatible github.com/census-instrumentation/opencensus-proto v0.1.0 // indirect @@ -19,30 +21,32 @@ require ( github.com/fatih/color v1.7.0 github.com/ghodss/yaml v1.0.0 // indirect github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef // indirect - github.com/google/go-cmp v0.2.0 + github.com/google/go-cmp v0.3.1 github.com/google/go-containerregistry v0.0.0-20190320210540-8d4083db9aa0 // indirect github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367 // indirect + github.com/google/uuid v1.1.1 // indirect github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d // indirect github.com/hako/durafmt v0.0.0-20180520121703-7b7ae1e72ead github.com/hinshun/vt10x v0.0.0-20180809195222-d55458df857c github.com/imdario/mergo v0.3.7 // indirect github.com/jonboulle/clockwork v0.1.1-0.20190114141812-62fb9bc030d1 github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3 // indirect - github.com/knative/pkg v0.0.0-20190409220258-28cfa161499b github.com/kr/pretty v0.1.0 // indirect github.com/kr/pty v1.1.8 // indirect github.com/markbates/inflect v1.0.4 // indirect github.com/mattbaird/jsonpatch v0.0.0-20171005235357-81af80346b1a // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 // indirect + github.com/openzipkin/zipkin-go v0.1.6 // indirect github.com/peterbourgon/diskv v2.0.1+incompatible // indirect github.com/pkg/errors v0.8.1 + github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829 // indirect github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 // indirect github.com/prometheus/procfs v0.0.0-20190322151404-55ae3d9d5573 // indirect github.com/spf13/cobra v0.0.5 github.com/spf13/pflag v1.0.3 github.com/stretchr/testify v1.2.2 - github.com/tektoncd/pipeline v0.5.2 + github.com/tektoncd/pipeline v0.6.0 github.com/tektoncd/plumbing v0.0.0-20190604151109-373083123d6a go.uber.org/atomic v1.3.2 // indirect go.uber.org/multierr v1.1.0 // indirect @@ -58,6 +62,7 @@ require ( k8s.io/klog v0.2.0 // indirect k8s.io/kube-openapi v0.0.0-20171101183504-39a7bf85c140 // indirect k8s.io/kubernetes v1.13.3 // indirect + knative.dev/pkg v0.0.0-20190719141030-e4bc08cc8ded sigs.k8s.io/yaml v1.1.0 // indirect ) diff --git a/go.sum b/go.sum index 3831ccb91..1295fa96c 100644 --- a/go.sum +++ b/go.sum @@ -6,6 +6,8 @@ cloud.google.com/go v0.37.2 h1:4y4L7BdHenTfZL0HervofNTHh9Ad6mNX72cQvl+5eH0= cloud.google.com/go v0.37.2/go.mod h1:H8IAquKe2L30IxoupDgqTaQvKSwF/c8prYHynGIWQbA= contrib.go.opencensus.io/exporter/ocagent v0.2.0 h1:Q/jXnVbliDYozuWJni9452xsSUuo+y8yrioxRgofBhE= contrib.go.opencensus.io/exporter/ocagent v0.2.0/go.mod h1:0fnkYHF+ORKj7HWzOExKkUHeFX79gXSKUQbpnAM+wzo= +contrib.go.opencensus.io/exporter/prometheus v0.1.0 h1:SByaIoWwNgMdPSgl5sMqM2KDE5H/ukPWBRo314xiDvg= +contrib.go.opencensus.io/exporter/prometheus v0.1.0/go.mod h1:cGFniUXGZlKRjzOyuZJ6mgB+PgBcCIa79kEKR8YCW+A= contrib.go.opencensus.io/exporter/stackdriver v0.9.1 h1:W6APgQ9we4BH8U8bnq/FvwLKo2WSMHuiMkkS/Slkg30= contrib.go.opencensus.io/exporter/stackdriver v0.9.1/go.mod h1:hNe5qQofPbg6bLQY5wHCvQ7o+2E5P8PkegEuQ+MyRw0= git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= @@ -90,6 +92,8 @@ github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c h1:964Od4U6p2jUkFxvCy github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-containerregistry v0.0.0-20190320210540-8d4083db9aa0 h1:NPHi7Dw3wo3MpehiKV4YOZDKMA4RcwpRAdi77NSW/5s= github.com/google/go-containerregistry v0.0.0-20190320210540-8d4083db9aa0/go.mod h1:yZAFP63pRshzrEYLXLGPmUt0Ay+2zdjmMN1loCnRLUk= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= @@ -98,6 +102,8 @@ github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367 h1:ScAXWS+TR6MZKex+7 github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go v2.0.0+incompatible h1:j0GKcs05QVmm7yesiZq2+9cxHkNK9YM6zKx4D2qucQU= github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= github.com/googleapis/gax-go/v2 v2.0.4 h1:hU4mGcQI4DaAYW+IbTun+2qEZVFxK0ySjQLTbS0VQKc= @@ -186,6 +192,7 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829 h1:D+CiwcpGTW6pL6bv6KI3KbyEyCKyS+1JWS2h8PNDnGA= github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= @@ -193,10 +200,12 @@ github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1: github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.2.0 h1:kUZDBDTdBVBYBj5Tmh2NZLlF60mfjA27rM34b+cVwNU= github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190322151404-55ae3d9d5573 h1:gAuD3LIrjkoOOPLlhGlZWZXztrQII9a9kT6HS5jFtSY= github.com/prometheus/procfs v0.0.0-20190322151404-55ae3d9d5573/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= @@ -219,6 +228,8 @@ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXf github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= github.com/tektoncd/pipeline v0.5.2 h1:3+OSjEamMxBM+qIvhZowE6rSLWZdtUT3jSkqIDG5MnA= github.com/tektoncd/pipeline v0.5.2/go.mod h1:IZzJdiX9EqEMuUcgdnElozdYYRh0/ZRC+NKMLj1K3Yw= +github.com/tektoncd/pipeline v0.6.0 h1:kYN3Ejm2T9GuXcIdAZc76aIUZqUqPTcfEx/7e7DgQlI= +github.com/tektoncd/pipeline v0.6.0/go.mod h1:IZzJdiX9EqEMuUcgdnElozdYYRh0/ZRC+NKMLj1K3Yw= github.com/tektoncd/plumbing v0.0.0-20190604151109-373083123d6a h1:4HMTW2uHdQIm8XabCE6P4z/ikCx9eqK8Tvo3noR5RV4= github.com/tektoncd/plumbing v0.0.0-20190604151109-373083123d6a/go.mod h1:dvZoTaPGpr3ZDqOUR1sc8VOhh/7OzYncVnbQETJTqvQ= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= @@ -229,6 +240,8 @@ go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= go.opencensus.io v0.19.1/go.mod h1:gug0GbSHa8Pafr0d2urOSgoXHZ6x/RUlaiT0d9pqb4A= go.opencensus.io v0.19.2 h1:ZZpq6xI6kv/LuE/5s5UQvBU5vMjvRnPb8PvJrIntAnc= go.opencensus.io v0.19.2/go.mod h1:NO/8qkisMZLZ1FCsKNqtJPwc8/TaclWyY0B6wcYNg9M= +go.opencensus.io v0.21.0 h1:mU6zScU4U1YAFPHEHYk+3JC4SY7JxgkqS10ZOSyksNg= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.uber.org/atomic v1.3.2 h1:2Oa65PReHzfn29GpvgsYwloV9AVFHPDk8tYxt2c2tr4= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= @@ -257,6 +270,7 @@ golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -364,5 +378,10 @@ k8s.io/kube-openapi v0.0.0-20171101183504-39a7bf85c140 h1:j1Zez+Xb4OWvCdROqeq8sP k8s.io/kube-openapi v0.0.0-20171101183504-39a7bf85c140/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= k8s.io/kubernetes v1.13.3 h1:46t44D87wKtdKFgr/lXM60K8xPrW0wO67Woof3Vsv6E= k8s.io/kubernetes v1.13.3/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= +knative.dev/pkg v0.0.0-20190719141030-e4bc08cc8ded h1:CHn7mNgIE5GHMJqfNPFLBLcZlCdaTdQmAA8Uu6XLOKc= +knative.dev/pkg v0.0.0-20190719141030-e4bc08cc8ded/go.mod h1:pgODObA1dTyhNoFxPZTTjNWfx6F0aKsKzn+vaT9XO/Q= +knative.dev/pkg v0.0.0-20190817231834-12ee58e32cc8 h1:GDSaSL05bUpC4JtShhWTXKIG/uualFfN6td0cdXaw7k= +knative.dev/pkg v0.0.0-20190817231834-12ee58e32cc8/go.mod h1:pgODObA1dTyhNoFxPZTTjNWfx6F0aKsKzn+vaT9XO/Q= +knative.dev/pkg v0.0.0-20190820015002-9cc6a645418f h1:H/VzwXgSNDRRTCpA5H/Xcc6o2IUAUpQxtpCoaO1QxF0= sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= diff --git a/pkg/cmd/clustertask/list_test.go b/pkg/cmd/clustertask/list_test.go index f109127d2..c952bc3ce 100644 --- a/pkg/cmd/clustertask/list_test.go +++ b/pkg/cmd/clustertask/list_test.go @@ -22,7 +22,6 @@ import ( "github.com/google/go-cmp/cmp" "github.com/jonboulle/clockwork" "github.com/tektoncd/cli/pkg/test" - tu "github.com/tektoncd/cli/pkg/test" cb "github.com/tektoncd/cli/pkg/test/builder" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" pipelinetest "github.com/tektoncd/pipeline/test" @@ -30,7 +29,7 @@ import ( ) func TestClusterTaskList_Empty(t *testing.T) { - cs, _ := pipelinetest.SeedTestData(t, pipelinetest.Data{}) + cs, _ := test.SeedTestData(t, pipelinetest.Data{}) p := &test.Params{Tekton: cs.Pipeline} clustertask := Command(p) @@ -38,7 +37,7 @@ func TestClusterTaskList_Empty(t *testing.T) { if err != nil { t.Errorf("Unexpected error: %v", err) } - tu.AssertOutput(t, emptyMsg+"\n", output) + test.AssertOutput(t, emptyMsg+"\n", output) } func TestClusterTaskListOnlyClusterTasks(t *testing.T) { @@ -49,7 +48,7 @@ func TestClusterTaskListOnlyClusterTasks(t *testing.T) { tb.ClusterTask("pineapple", cb.ClusterTaskCreationTime(clock.Now().Add(-512*time.Hour))), } - cs, _ := pipelinetest.SeedTestData(t, pipelinetest.Data{ClusterTasks: clustertasks}) + cs, _ := test.SeedTestData(t, pipelinetest.Data{ClusterTasks: clustertasks}) p := &test.Params{Tekton: cs.Pipeline, Clock: clock} clustertask := Command(p) diff --git a/pkg/cmd/pipeline/describe_test.go b/pkg/cmd/pipeline/describe_test.go index a6dffdd41..cce0659e6 100644 --- a/pkg/cmd/pipeline/describe_test.go +++ b/pkg/cmd/pipeline/describe_test.go @@ -21,19 +21,18 @@ import ( "github.com/google/go-cmp/cmp" "github.com/jonboulle/clockwork" - "github.com/knative/pkg/apis" "github.com/tektoncd/cli/pkg/test" - tu "github.com/tektoncd/cli/pkg/test" cb "github.com/tektoncd/cli/pkg/test/builder" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" "github.com/tektoncd/pipeline/pkg/reconciler/v1alpha1/pipelinerun/resources" pipelinetest "github.com/tektoncd/pipeline/test" tb "github.com/tektoncd/pipeline/test/builder" corev1 "k8s.io/api/core/v1" + "knative.dev/pkg/apis" ) func TestPipelineDescribe_Empty(t *testing.T) { - cs, _ := pipelinetest.SeedTestData(t, pipelinetest.Data{}) + cs, _ := test.SeedTestData(t, pipelinetest.Data{}) p := &test.Params{Tekton: cs.Pipeline} pipeline := Command(p) @@ -42,13 +41,13 @@ func TestPipelineDescribe_Empty(t *testing.T) { t.Errorf("Error expected here") } expected := "pipelines.tekton.dev \"bar\" not found" - tu.AssertOutput(t, expected, err.Error()) + test.AssertOutput(t, expected, err.Error()) } func TestPipelinesDescribe_with_run(t *testing.T) { clock := clockwork.NewFakeClock() - cs, _ := pipelinetest.SeedTestData(t, pipelinetest.Data{ + cs, _ := test.SeedTestData(t, pipelinetest.Data{ Pipelines: []*v1alpha1.Pipeline{ tb.Pipeline("pipeline", "ns", // created 5 minutes back @@ -109,7 +108,7 @@ func TestPipelinesDescribe_with_run(t *testing.T) { func TestPipelinesDescribe_with_task_run(t *testing.T) { clock := clockwork.NewFakeClock() - cs, _ := pipelinetest.SeedTestData(t, pipelinetest.Data{ + cs, _ := test.SeedTestData(t, pipelinetest.Data{ Pipelines: []*v1alpha1.Pipeline{ tb.Pipeline("pipeline", "ns", // created 5 minutes back @@ -175,7 +174,7 @@ func TestPipelinesDescribe_with_task_run(t *testing.T) { func TestPipelinesDescribe_with_resource_task_run(t *testing.T) { clock := clockwork.NewFakeClock() - cs, _ := pipelinetest.SeedTestData(t, pipelinetest.Data{ + cs, _ := test.SeedTestData(t, pipelinetest.Data{ Pipelines: []*v1alpha1.Pipeline{ tb.Pipeline("pipeline", "ns", // created 5 minutes back diff --git a/pkg/cmd/pipeline/list_test.go b/pkg/cmd/pipeline/list_test.go index fb6906e59..91dd7bcd7 100644 --- a/pkg/cmd/pipeline/list_test.go +++ b/pkg/cmd/pipeline/list_test.go @@ -7,10 +7,9 @@ import ( "github.com/google/go-cmp/cmp" "github.com/jonboulle/clockwork" - "github.com/knative/pkg/apis" "github.com/tektoncd/cli/pkg/test" - tu "github.com/tektoncd/cli/pkg/test" cb "github.com/tektoncd/cli/pkg/test/builder" + "knative.dev/pkg/apis" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" "github.com/tektoncd/pipeline/pkg/reconciler/v1alpha1/pipelinerun/resources" @@ -21,7 +20,7 @@ import ( func TestPipelinesList_empty(t *testing.T) { - cs, _ := pipelinetest.SeedTestData(t, pipelinetest.Data{}) + cs, _ := test.SeedTestData(t, pipelinetest.Data{}) p := &test.Params{Tekton: cs.Pipeline} pipeline := Command(p) @@ -29,7 +28,7 @@ func TestPipelinesList_empty(t *testing.T) { if err != nil { t.Errorf("Unexpected error: %v", err) } - tu.AssertOutput(t, "No pipelines\n", output) + test.AssertOutput(t, "No pipelines\n", output) } func TestPipelineList_only_pipelines(t *testing.T) { @@ -66,7 +65,7 @@ func TestPipelineList_only_pipelines(t *testing.T) { func TestPipelinesList_with_single_run(t *testing.T) { clock := clockwork.NewFakeClock() - cs, _ := pipelinetest.SeedTestData(t, pipelinetest.Data{ + cs, _ := test.SeedTestData(t, pipelinetest.Data{ Pipelines: []*v1alpha1.Pipeline{ tb.Pipeline("pipeline", "ns", // created 5 minutes back @@ -142,7 +141,7 @@ func TestPipelinesList_latest_run(t *testing.T) { secondRunCompleted = secondRunStarted.Add(runDuration) // takes less thus completes ) - cs, _ := pipelinetest.SeedTestData(t, pipelinetest.Data{ + cs, _ := test.SeedTestData(t, pipelinetest.Data{ Pipelines: []*v1alpha1.Pipeline{ tb.Pipeline("pipeline", "ns", // created 5 minutes back @@ -217,5 +216,5 @@ func seedPipelines(t *testing.T, clock clockwork.Clock, ps []pipelineDetails, ns ) } - return pipelinetest.SeedTestData(t, pipelinetest.Data{Pipelines: pipelines}) + return test.SeedTestData(t, pipelinetest.Data{Pipelines: pipelines}) } diff --git a/pkg/cmd/pipeline/logs_test.go b/pkg/cmd/pipeline/logs_test.go index 3c8533745..cc888bc89 100644 --- a/pkg/cmd/pipeline/logs_test.go +++ b/pkg/cmd/pipeline/logs_test.go @@ -22,17 +22,16 @@ import ( "github.com/AlecAivazis/survey/v2/terminal" "github.com/Netflix/go-expect" - tu "github.com/tektoncd/cli/pkg/test" + "github.com/tektoncd/cli/pkg/test" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" - "github.com/tektoncd/pipeline/test" tb "github.com/tektoncd/pipeline/test/builder" "github.com/jonboulle/clockwork" - "github.com/knative/pkg/apis" cb "github.com/tektoncd/cli/pkg/test/builder" "github.com/tektoncd/pipeline/pkg/reconciler/v1alpha1/pipelinerun/resources" pipelinetest "github.com/tektoncd/pipeline/test" corev1 "k8s.io/api/core/v1" + "knative.dev/pkg/apis" ) func init() { @@ -48,70 +47,70 @@ var ( ) func TestLogs_no_pipeline(t *testing.T) { - cs, _ := pipelinetest.SeedTestData(t, pipelinetest.Data{ + cs, _ := test.SeedTestData(t, pipelinetest.Data{ Pipelines: []*v1alpha1.Pipeline{ tb.Pipeline(pipelineName, ns), }}) - p := &tu.Params{Tekton: cs.Pipeline, Kube: cs.Kube} + p := &test.Params{Tekton: cs.Pipeline, Kube: cs.Kube} c := Command(p) - out, err := tu.ExecuteCommand(c, "logs", "-n", "ns") + out, err := test.ExecuteCommand(c, "logs", "-n", "ns") if err != nil { t.Errorf("Unexpected error: %v", err) } expected := "No pipelines found in namespace: ns\n" - tu.AssertOutput(t, expected, out) + test.AssertOutput(t, expected, out) } func TestLogs_no_runs(t *testing.T) { - cs, _ := pipelinetest.SeedTestData(t, pipelinetest.Data{ + cs, _ := test.SeedTestData(t, pipelinetest.Data{ Pipelines: []*v1alpha1.Pipeline{ tb.Pipeline(pipelineName, ns), }}) - p := &tu.Params{Tekton: cs.Pipeline, Kube: cs.Kube} + p := &test.Params{Tekton: cs.Pipeline, Kube: cs.Kube} c := Command(p) - out, err := tu.ExecuteCommand(c, "logs", pipelineName, "-n", ns) + out, err := test.ExecuteCommand(c, "logs", pipelineName, "-n", ns) if err != nil { t.Errorf("Unexpected error: %v", err) } expected := "No pipelineruns found for pipeline: output-pipeline\n" - tu.AssertOutput(t, expected, out) + test.AssertOutput(t, expected, out) } func TestLogs_wrong_pipeline(t *testing.T) { - cs, _ := pipelinetest.SeedTestData(t, pipelinetest.Data{ + cs, _ := test.SeedTestData(t, pipelinetest.Data{ Pipelines: []*v1alpha1.Pipeline{ tb.Pipeline(pipelineName, ns), }}) - p := &tu.Params{Tekton: cs.Pipeline, Kube: cs.Kube} + p := &test.Params{Tekton: cs.Pipeline, Kube: cs.Kube} c := Command(p) - _, err := tu.ExecuteCommand(c, "logs", "pipeline", "-n", ns) + _, err := test.ExecuteCommand(c, "logs", "pipeline", "-n", ns) expected := "pipelines.tekton.dev \"pipeline\" not found" - tu.AssertOutput(t, expected, err.Error()) + test.AssertOutput(t, expected, err.Error()) } func TestLogs_wrong_run(t *testing.T) { - cs, _ := pipelinetest.SeedTestData(t, pipelinetest.Data{ + cs, _ := test.SeedTestData(t, pipelinetest.Data{ Pipelines: []*v1alpha1.Pipeline{ tb.Pipeline(pipelineName, ns), }}) - p := &tu.Params{Tekton: cs.Pipeline, Kube: cs.Kube} + p := &test.Params{Tekton: cs.Pipeline, Kube: cs.Kube} c := Command(p) - _, err := tu.ExecuteCommand(c, "logs", "pipeline", "pipelinerun", "-n", "ns") + _, err := test.ExecuteCommand(c, "logs", "pipeline", "pipelinerun", "-n", "ns") expected := "pipelineruns.tekton.dev \"pipelinerun\" not found" - tu.AssertOutput(t, expected, err.Error()) + test.AssertOutput(t, expected, err.Error()) } func TestLogs_interactive_get_all_inputs(t *testing.T) { clock := clockwork.NewFakeClock() - cs, _ := pipelinetest.SeedTestData(t, pipelinetest.Data{ + cs, _ := test.SeedTestData(t, pipelinetest.Data{ Pipelines: []*v1alpha1.Pipeline{ tb.Pipeline(pipelineName, ns, // created 15 minutes back @@ -222,7 +221,7 @@ func TestLogs_interactive_get_all_inputs(t *testing.T) { func TestLogs_interactive_ask_runs(t *testing.T) { clock := clockwork.NewFakeClock() - cs, _ := pipelinetest.SeedTestData(t, pipelinetest.Data{ + cs, _ := test.SeedTestData(t, pipelinetest.Data{ Pipelines: []*v1alpha1.Pipeline{ tb.Pipeline(pipelineName, ns, // created 15 minutes back @@ -306,8 +305,8 @@ func TestLogs_interactive_ask_runs(t *testing.T) { } } -func logOpts(name string, ns string, cs test.Clients) *logOptions { - p := tu.Params{ +func logOpts(name string, ns string, cs pipelinetest.Clients) *logOptions { + p := test.Params{ Kube: cs.Kube, Tekton: cs.Pipeline, } diff --git a/pkg/cmd/pipeline/start.go b/pkg/cmd/pipeline/start.go index baa54e520..e2d231755 100644 --- a/pkg/cmd/pipeline/start.go +++ b/pkg/cmd/pipeline/start.go @@ -292,8 +292,11 @@ func parseParam(p []string) (map[string]v1alpha1.Param, error) { return nil, errors.New(errMsg) } params[r[0]] = v1alpha1.Param{ - Name: r[0], - Value: r[1], + Name: r[0], + Value: v1alpha1.ArrayOrString{ + Type: v1alpha1.ParamTypeString, + StringVal: r[1], + }, } } return params, nil diff --git a/pkg/cmd/pipeline/start_test.go b/pkg/cmd/pipeline/start_test.go index ddd2352a1..312ff3db7 100644 --- a/pkg/cmd/pipeline/start_test.go +++ b/pkg/cmd/pipeline/start_test.go @@ -22,14 +22,13 @@ import ( "time" "github.com/jonboulle/clockwork" - "github.com/knative/pkg/apis" "github.com/tektoncd/cli/pkg/cli" - tu "github.com/tektoncd/cli/pkg/test" + "github.com/tektoncd/cli/pkg/test" cb "github.com/tektoncd/cli/pkg/test/builder" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" fakepipelineclientset "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/fake" "github.com/tektoncd/pipeline/pkg/reconciler/v1alpha1/pipelinerun/resources" - "github.com/tektoncd/pipeline/test" + pipelinetest "github.com/tektoncd/pipeline/test" tb "github.com/tektoncd/pipeline/test/builder" corev1 "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -40,6 +39,7 @@ import ( "k8s.io/apimachinery/pkg/watch" fakekubeclientset "k8s.io/client-go/kubernetes/fake" k8stest "k8s.io/client-go/testing" + "knative.dev/pkg/apis" ) func newPipelineClient(objs ...runtime.Object) *fakepipelineclientset.Clientset { @@ -83,9 +83,9 @@ func newPipelineClient(objs ...runtime.Object) *fakepipelineclientset.Clientset } func Test_start_has_pipeline_arg(t *testing.T) { - c := Command(&tu.Params{}) + c := Command(&test.Params{}) - _, err := tu.ExecuteCommand(c, "start", "-n", "ns") + _, err := test.ExecuteCommand(c, "start", "-n", "ns") if err == nil { t.Error("Expecting an error but it's empty") @@ -97,8 +97,8 @@ func Test_start_pipeline_not_found(t *testing.T) { tb.Pipeline("test-pipeline", "foo", tb.PipelineSpec( tb.PipelineDeclaredResource("git-repo", "git"), - tb.PipelineParam("pipeline-param", tb.PipelineParamDefault("somethingdifferent")), - tb.PipelineParam("rev-param", tb.PipelineParamDefault("revision")), + tb.PipelineParamSpec("pipeline-param", v1alpha1.ParamTypeString, tb.ParamSpecDefault("somethingdifferent")), + tb.PipelineParamSpec("rev-param", v1alpha1.ParamTypeString, tb.ParamSpecDefault("revision")), tb.PipelineTask("unit-test-1", "unit-test-task", tb.PipelineTaskInputResource("workspace", "git-repo"), tb.PipelineTaskOutputResource("image-to-use", "best-image"), @@ -108,13 +108,13 @@ func Test_start_pipeline_not_found(t *testing.T) { ), } - cs, _ := test.SeedTestData(t, test.Data{Pipelines: ps}) - p := &tu.Params{Tekton: cs.Pipeline, Kube: cs.Kube} + cs, _ := test.SeedTestData(t, pipelinetest.Data{Pipelines: ps}) + p := &test.Params{Tekton: cs.Pipeline, Kube: cs.Kube} pipeline := Command(p) - got, _ := tu.ExecuteCommand(pipeline, "start", "test-pipeline-2", "-n", "ns") + got, _ := test.ExecuteCommand(pipeline, "start", "test-pipeline-2", "-n", "ns") expected := "Error: " + errInvalidPipeline.Error() + "\n" - tu.AssertOutput(t, expected, got) + test.AssertOutput(t, expected, got) } func Test_start_pipeline(t *testing.T) { @@ -125,8 +125,8 @@ func Test_start_pipeline(t *testing.T) { tb.Pipeline(pipelineName, "ns", tb.PipelineSpec( tb.PipelineDeclaredResource("git-repo", "git"), - tb.PipelineParam("pipeline-param", tb.PipelineParamDefault("somethingdifferent")), - tb.PipelineParam("rev-param", tb.PipelineParamDefault("revision")), + tb.PipelineParamSpec("pipeline-param", v1alpha1.ParamTypeString, tb.ParamSpecDefault("somethingdifferent")), + tb.PipelineParamSpec("rev-param", v1alpha1.ParamTypeString, tb.ParamSpecDefault("revision")), tb.PipelineTask("unit-test-1", "unit-test-task", tb.PipelineTaskInputResource("workspace", "git-repo"), tb.PipelineTaskOutputResource("image-to-use", "best-image"), @@ -136,11 +136,11 @@ func Test_start_pipeline(t *testing.T) { ), // pipeline } - cs, _ := test.SeedTestData(t, test.Data{Pipelines: ps}) - p := &tu.Params{Tekton: cs.Pipeline, Kube: cs.Kube} + cs, _ := test.SeedTestData(t, pipelinetest.Data{Pipelines: ps}) + p := &test.Params{Tekton: cs.Pipeline, Kube: cs.Kube} pipeline := Command(p) - got, _ := tu.ExecuteCommand(pipeline, "start", pipelineName, + got, _ := test.ExecuteCommand(pipeline, "start", pipelineName, "-r=source=scaffold-git", "-p=key1=value1", "-s=svc1", @@ -148,7 +148,7 @@ func Test_start_pipeline(t *testing.T) { "-n", "ns") expected := "Pipelinerun started: \n" - tu.AssertOutput(t, expected, got) + test.AssertOutput(t, expected, got) pr, err := cs.Pipeline.TektonV1alpha1().PipelineRuns("ns").List(v1.ListOptions{}) if err != nil { @@ -169,8 +169,8 @@ func Test_start_pipeline_last(t *testing.T) { tb.PipelineSpec( tb.PipelineDeclaredResource("git-repo", "git"), tb.PipelineDeclaredResource("build-image", "image"), - tb.PipelineParam("pipeline-param-1", tb.PipelineParamDefault("somethingdifferent-1")), - tb.PipelineParam("rev-param", tb.PipelineParamDefault("revision")), + tb.PipelineParamSpec("pipeline-param-1", v1alpha1.ParamTypeString, tb.ParamSpecDefault("somethingdifferent-1")), + tb.PipelineParamSpec("rev-param", v1alpha1.ParamTypeString, tb.ParamSpecDefault("revision")), tb.PipelineTask("unit-test-1", "unit-test-task", tb.PipelineTaskInputResource("workspace", "git-repo"), tb.PipelineTaskOutputResource("image-to-use", "best-image"), @@ -196,20 +196,20 @@ func Test_start_pipeline_last(t *testing.T) { objs := []runtime.Object{ps[0], prs[0]} pClient := newPipelineClient(objs...) - cs := test.Clients{ + cs := pipelinetest.Clients{ Pipeline: pClient, Kube: fakekubeclientset.NewSimpleClientset(), } - p := &tu.Params{Tekton: cs.Pipeline, Kube: cs.Kube} + p := &test.Params{Tekton: cs.Pipeline, Kube: cs.Kube} pipeline := Command(p) - got, _ := tu.ExecuteCommand(pipeline, "start", pipelineName, + got, _ := test.ExecuteCommand(pipeline, "start", pipelineName, "--last", "-nns") expected := "Pipelinerun started: random\n" - tu.AssertOutput(t, expected, got) + test.AssertOutput(t, expected, got) pr, err := cs.Pipeline.TektonV1alpha1().PipelineRuns(p.Namespace()).Get("random", v1.GetOptions{}) @@ -219,16 +219,16 @@ func Test_start_pipeline_last(t *testing.T) { for _, v := range pr.Spec.Resources { if v.Name == "git-repo" { - tu.AssertOutput(t, "some-repo", v.ResourceRef.Name) + test.AssertOutput(t, "some-repo", v.ResourceRef.Name) } } for _, v := range pr.Spec.Params { if v.Name == "rev-param" { - tu.AssertOutput(t, "revision1", v.Value) + test.AssertOutput(t, v1alpha1.ArrayOrString{Type: v1alpha1.ParamTypeString, StringVal: "revision1"}, v.Value) } } - tu.AssertOutput(t, "test-sa", pr.Spec.ServiceAccount) + test.AssertOutput(t, "test-sa", pr.Spec.ServiceAccount) } func Test_start_pipeline_last_merge(t *testing.T) { @@ -240,8 +240,8 @@ func Test_start_pipeline_last_merge(t *testing.T) { tb.PipelineSpec( tb.PipelineDeclaredResource("git-repo", "git"), tb.PipelineDeclaredResource("build-image", "image"), - tb.PipelineParam("pipeline-param-1", tb.PipelineParamDefault("somethingdifferent-1")), - tb.PipelineParam("rev-param", tb.PipelineParamDefault("revision")), + tb.PipelineParamSpec("", v1alpha1.ParamTypeString, tb.ParamSpecDefault("somethingdifferent-1")), + tb.PipelineParamSpec("rev-param", v1alpha1.ParamTypeString, tb.ParamSpecDefault("revision")), tb.PipelineTask("unit-test-1", "unit-test-task", tb.PipelineTaskInputResource("workspace", "git-repo"), tb.PipelineTaskOutputResource("image-to-use", "best-image"), @@ -269,15 +269,15 @@ func Test_start_pipeline_last_merge(t *testing.T) { objs := []runtime.Object{ps[0], prs[0]} pClient := newPipelineClient(objs...) - cs := test.Clients{ + cs := pipelinetest.Clients{ Pipeline: pClient, Kube: fakekubeclientset.NewSimpleClientset(), } - p := &tu.Params{Tekton: cs.Pipeline, Kube: cs.Kube} + p := &test.Params{Tekton: cs.Pipeline, Kube: cs.Kube} pipeline := Command(p) - got, _ := tu.ExecuteCommand(pipeline, "start", pipelineName, + got, _ := test.ExecuteCommand(pipeline, "start", pipelineName, "--last", "-r=git-repo=scaffold-git", "-p=rev-param=revision2", @@ -287,7 +287,7 @@ func Test_start_pipeline_last_merge(t *testing.T) { "-n=ns") expected := "Pipelinerun started: random\n" - tu.AssertOutput(t, expected, got) + test.AssertOutput(t, expected, got) pr, err := cs.Pipeline.TektonV1alpha1().PipelineRuns(p.Namespace()).Get("random", v1.GetOptions{}) @@ -297,23 +297,23 @@ func Test_start_pipeline_last_merge(t *testing.T) { for _, v := range pr.Spec.Resources { if v.Name == "git-repo" { - tu.AssertOutput(t, "scaffold-git", v.ResourceRef.Name) + test.AssertOutput(t, "scaffold-git", v.ResourceRef.Name) } } for _, v := range pr.Spec.Params { if v.Name == "rev-param" { - tu.AssertOutput(t, "revision2", v.Value) + test.AssertOutput(t, v1alpha1.ArrayOrString{Type: v1alpha1.ParamTypeString, StringVal: "revision2"}, v.Value) } } for _, v := range pr.Spec.ServiceAccounts { if v.TaskName == "task3" { - tu.AssertOutput(t, "task3svc3", v.ServiceAccount) + test.AssertOutput(t, "task3svc3", v.ServiceAccount) } } - tu.AssertOutput(t, "svc1", pr.Spec.ServiceAccount) + test.AssertOutput(t, "svc1", pr.Spec.ServiceAccount) } func Test_start_pipeline_last_no_pipelineruns(t *testing.T) { @@ -325,8 +325,8 @@ func Test_start_pipeline_last_no_pipelineruns(t *testing.T) { tb.PipelineSpec( tb.PipelineDeclaredResource("git-repo", "git"), tb.PipelineDeclaredResource("build-image", "image"), - tb.PipelineParam("pipeline-param-1", tb.PipelineParamDefault("somethingdifferent-1")), - tb.PipelineParam("rev-param", tb.PipelineParamDefault("revision")), + tb.PipelineParamSpec("pipeline-param-1", v1alpha1.ParamTypeString, tb.ParamSpecDefault("somethingdifferent-1")), + tb.PipelineParamSpec("rev-param", v1alpha1.ParamTypeString, tb.ParamSpecDefault("revision")), tb.PipelineTask("unit-test-1", "unit-test-task", tb.PipelineTaskInputResource("workspace", "git-repo"), tb.PipelineTaskOutputResource("image-to-use", "best-image"), @@ -339,20 +339,20 @@ func Test_start_pipeline_last_no_pipelineruns(t *testing.T) { objs := []runtime.Object{ps[0]} pClient := newPipelineClient(objs...) - cs := test.Clients{ + cs := pipelinetest.Clients{ Pipeline: pClient, Kube: fakekubeclientset.NewSimpleClientset(), } - p := &tu.Params{Tekton: cs.Pipeline, Kube: cs.Kube} + p := &test.Params{Tekton: cs.Pipeline, Kube: cs.Kube} pipeline := Command(p) - got, _ := tu.ExecuteCommand(pipeline, "start", pipelineName, + got, _ := test.ExecuteCommand(pipeline, "start", pipelineName, "--last", "-nns") expected := "Error: No pipelineruns found in namespace: ns\n" - tu.AssertOutput(t, expected, got) + test.AssertOutput(t, expected, got) } func Test_start_pipeline_last_list_err(t *testing.T) { @@ -364,8 +364,8 @@ func Test_start_pipeline_last_list_err(t *testing.T) { tb.PipelineSpec( tb.PipelineDeclaredResource("git-repo", "git"), tb.PipelineDeclaredResource("build-image", "image"), - tb.PipelineParam("pipeline-param-1", tb.PipelineParamDefault("somethingdifferent-1")), - tb.PipelineParam("rev-param", tb.PipelineParamDefault("revision")), + tb.PipelineParamSpec("pipeline-param-1", v1alpha1.ParamTypeString, tb.ParamSpecDefault("somethingdifferent-1")), + tb.PipelineParamSpec("rev-param", v1alpha1.ParamTypeString, tb.ParamSpecDefault("revision")), tb.PipelineTask("unit-test-1", "unit-test-task", tb.PipelineTaskInputResource("workspace", "git-repo"), tb.PipelineTaskOutputResource("image-to-use", "best-image"), @@ -375,20 +375,20 @@ func Test_start_pipeline_last_list_err(t *testing.T) { ), // pipeline } - cs, _ := test.SeedTestData(t, test.Data{Pipelines: ps}) - p := &tu.Params{Tekton: cs.Pipeline, Kube: cs.Kube} + cs, _ := test.SeedTestData(t, pipelinetest.Data{Pipelines: ps}) + p := &test.Params{Tekton: cs.Pipeline, Kube: cs.Kube} cs.Pipeline.PrependReactor("list", "pipelineruns", func(action k8stest.Action) (bool, runtime.Object, error) { return true, nil, errors.New("test generated error") }) pipeline := Command(p) - got, _ := tu.ExecuteCommand(pipeline, "start", pipelineName, + got, _ := test.ExecuteCommand(pipeline, "start", pipelineName, "--last", "-nns") expected := "Error: test generated error\n" - tu.AssertOutput(t, expected, got) + test.AssertOutput(t, expected, got) } func Test_start_pipeline_client_error(t *testing.T) { @@ -399,8 +399,8 @@ func Test_start_pipeline_client_error(t *testing.T) { tb.Pipeline(pipelineName, "namespace", tb.PipelineSpec( tb.PipelineDeclaredResource("git-repo", "git"), - tb.PipelineParam("pipeline-param", tb.PipelineParamDefault("somethingdifferent")), - tb.PipelineParam("rev-param", tb.PipelineParamDefault("revision")), + tb.PipelineParamSpec("pipeline-param", v1alpha1.ParamTypeString, tb.ParamSpecDefault("somethingdifferent")), + tb.PipelineParamSpec("rev-param", v1alpha1.ParamTypeString, tb.ParamSpecDefault("revision")), tb.PipelineTask("unit-test-1", "unit-test-task", tb.PipelineTaskInputResource("workspace", "git-repo"), tb.PipelineTaskOutputResource("image-to-use", "best-image"), @@ -410,22 +410,22 @@ func Test_start_pipeline_client_error(t *testing.T) { ), } - cs, _ := test.SeedTestData(t, test.Data{Pipelines: ps}) + cs, _ := test.SeedTestData(t, pipelinetest.Data{Pipelines: ps}) cs.Pipeline.PrependReactor("create", "*", func(_ k8stest.Action) (bool, runtime.Object, error) { return true, nil, fmt.Errorf("mock error") }) - p := &tu.Params{Tekton: cs.Pipeline, Kube: cs.Kube} + p := &test.Params{Tekton: cs.Pipeline, Kube: cs.Kube} pipeline := Command(p) - got, _ := tu.ExecuteCommand(pipeline, "start", pipelineName, + got, _ := test.ExecuteCommand(pipeline, "start", pipelineName, "-r=source=scaffold-git", "-p=key1=value1", "-s=svc1", "-n=namespace") expected := "Error: mock error\n" - tu.AssertOutput(t, expected, got) + test.AssertOutput(t, expected, got) } func Test_start_pipeline_resource_error(t *testing.T) { @@ -436,8 +436,8 @@ func Test_start_pipeline_resource_error(t *testing.T) { tb.Pipeline(pipelineName, "namespace", tb.PipelineSpec( tb.PipelineDeclaredResource("git-repo", "git"), - tb.PipelineParam("pipeline-param", tb.PipelineParamDefault("somethingdifferent")), - tb.PipelineParam("rev-param", tb.PipelineParamDefault("revision")), + tb.PipelineParamSpec("pipeline-param", v1alpha1.ParamTypeString, tb.ParamSpecDefault("somethingdifferent")), + tb.PipelineParamSpec("rev-param", v1alpha1.ParamTypeString, tb.ParamSpecDefault("revision")), tb.PipelineTask("unit-test-1", "unit-test-task", tb.PipelineTaskInputResource("workspace", "git-repo"), tb.PipelineTaskOutputResource("image-to-use", "best-image"), @@ -447,11 +447,11 @@ func Test_start_pipeline_resource_error(t *testing.T) { ), } - cs, _ := test.SeedTestData(t, test.Data{Pipelines: ps}) + cs, _ := test.SeedTestData(t, pipelinetest.Data{Pipelines: ps}) - p := &tu.Params{Tekton: cs.Pipeline, Kube: cs.Kube} + p := &test.Params{Tekton: cs.Pipeline, Kube: cs.Kube} pipeline := Command(p) - got, _ := tu.ExecuteCommand(pipeline, "start", pipelineName, + got, _ := test.ExecuteCommand(pipeline, "start", pipelineName, "-r scaffold-git", "-p=key1=value1", "-s=svc1", @@ -459,7 +459,7 @@ func Test_start_pipeline_resource_error(t *testing.T) { expected := "Error: invalid resource parameter: scaffold-git\nPlease pass resource as -r ResourceName=ResourceRef\n" - tu.AssertOutput(t, expected, got) + test.AssertOutput(t, expected, got) } func Test_start_pipeline_param_error(t *testing.T) { @@ -470,8 +470,8 @@ func Test_start_pipeline_param_error(t *testing.T) { tb.Pipeline(pipelineName, "namespace", tb.PipelineSpec( tb.PipelineDeclaredResource("git-repo", "git"), - tb.PipelineParam("pipeline-param", tb.PipelineParamDefault("somethingdifferent")), - tb.PipelineParam("rev-param", tb.PipelineParamDefault("revision")), + tb.PipelineParamSpec("pipeline-param", v1alpha1.ParamTypeString, tb.ParamSpecDefault("somethingdifferent")), + tb.PipelineParamSpec("rev-param", v1alpha1.ParamTypeString, tb.ParamSpecDefault("revision")), tb.PipelineTask("unit-test-1", "unit-test-task", tb.PipelineTaskInputResource("workspace", "git-repo"), tb.PipelineTaskOutputResource("image-to-use", "best-image"), @@ -481,12 +481,12 @@ func Test_start_pipeline_param_error(t *testing.T) { ), } - cs, _ := test.SeedTestData(t, test.Data{Pipelines: ps}) + cs, _ := test.SeedTestData(t, pipelinetest.Data{Pipelines: ps}) - p := &tu.Params{Tekton: cs.Pipeline, Kube: cs.Kube} + p := &test.Params{Tekton: cs.Pipeline, Kube: cs.Kube} pipeline := Command(p) - got, _ := tu.ExecuteCommand(pipeline, "start", pipelineName, + got, _ := test.ExecuteCommand(pipeline, "start", pipelineName, "-r=source=scaffold-git", "-p value1", "-s=svc1", @@ -494,7 +494,7 @@ func Test_start_pipeline_param_error(t *testing.T) { expected := "Error: invalid param parameter: value1\nPlease pass param as -p ParamName=ParamValue\n" - tu.AssertOutput(t, expected, got) + test.AssertOutput(t, expected, got) } func Test_start_pipeline_task_svc_error(t *testing.T) { @@ -505,7 +505,7 @@ func Test_start_pipeline_task_svc_error(t *testing.T) { tb.Pipeline(pipelineName, "foo", tb.PipelineSpec( tb.PipelineDeclaredResource("git-repo", "git"), - tb.PipelineParam("pipeline-param", tb.PipelineParamDefault("somethingdifferent")), + tb.PipelineParamSpec("pipeline-param", v1alpha1.ParamTypeString, tb.ParamSpecDefault("somethingdifferent")), tb.PipelineTask("unit-test-1", "unit-test-task", tb.PipelineTaskInputResource("workspace", "git-repo"), tb.PipelineTaskOutputResource("image-to-use", "best-image"), @@ -515,12 +515,12 @@ func Test_start_pipeline_task_svc_error(t *testing.T) { ), } - cs, _ := test.SeedTestData(t, test.Data{Pipelines: ps}) + cs, _ := test.SeedTestData(t, pipelinetest.Data{Pipelines: ps}) - p := &tu.Params{Tekton: cs.Pipeline, Kube: cs.Kube} + p := &test.Params{Tekton: cs.Pipeline, Kube: cs.Kube} pipeline := Command(p) - got, _ := tu.ExecuteCommand(pipeline, "start", pipelineName, + got, _ := test.ExecuteCommand(pipeline, "start", pipelineName, "--task-serviceaccount=task3svc3", "-n=foo") @@ -528,7 +528,7 @@ func Test_start_pipeline_task_svc_error(t *testing.T) { "Please pass task service accounts as --task-serviceaccount" + " TaskName=ServiceAccount\n" - tu.AssertOutput(t, expected, got) + test.AssertOutput(t, expected, got) } func Test_parseRes(t *testing.T) { @@ -594,8 +594,16 @@ func Test_parseParam(t *testing.T) { p: []string{"key1=value1", "key2=value2"}, }, want: map[string]v1alpha1.Param{ - "key1": {Name: "key1", Value: "value1"}, - "key2": {Name: "key2", Value: "value2"}, + "key1": {Name: "key1", Value: v1alpha1.ArrayOrString{ + Type: v1alpha1.ParamTypeString, + StringVal: "value1", + }, + }, + "key2": {Name: "key2", Value: v1alpha1.ArrayOrString{ + Type: v1alpha1.ParamTypeString, + StringVal: "value2", + }, + }, }, wantErr: false, }, { @@ -715,11 +723,11 @@ func Test_lastPipelineRun(t *testing.T) { name: "lastPipelineRun Test No Err", args: args{ pipeline: "test", - p: func() *tu.Params { + p: func() *test.Params { clock.Advance(time.Duration(60) * time.Minute) - cs, _ := test.SeedTestData(t, test.Data{PipelineRuns: prs}) - p := &tu.Params{Tekton: cs.Pipeline, Clock: clock} + cs, _ := test.SeedTestData(t, pipelinetest.Data{PipelineRuns: prs}) + p := &test.Params{Tekton: cs.Pipeline, Clock: clock} p.SetNamespace("namespace") return p @@ -732,9 +740,9 @@ func Test_lastPipelineRun(t *testing.T) { name: "lastPipelineRun Test Err", args: args{ pipeline: "test", - p: func() *tu.Params { - cs, _ := test.SeedTestData(t, test.Data{}) - p := &tu.Params{Tekton: cs.Pipeline} + p: func() *test.Params { + cs, _ := test.SeedTestData(t, pipelinetest.Data{}) + p := &test.Params{Tekton: cs.Pipeline} p.SetNamespace("namespace") return p @@ -752,7 +760,7 @@ func Test_lastPipelineRun(t *testing.T) { t.Errorf("lastPipelineRun() error = %v, wantErr %v", err, tt.wantErr) return } else if err == nil { - tu.AssertOutput(t, tt.want, got.Name) + test.AssertOutput(t, tt.want, got.Name) } }) } diff --git a/pkg/cmd/pipelineresource/describe_test.go b/pkg/cmd/pipelineresource/describe_test.go index b1e0b3dad..3902e5eda 100644 --- a/pkg/cmd/pipelineresource/describe_test.go +++ b/pkg/cmd/pipelineresource/describe_test.go @@ -19,14 +19,13 @@ import ( "testing" "github.com/tektoncd/cli/pkg/test" - tu "github.com/tektoncd/cli/pkg/test" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" pipelinetest "github.com/tektoncd/pipeline/test" tb "github.com/tektoncd/pipeline/test/builder" ) func TestPipelineResourceDescribe_Empty(t *testing.T) { - cs, _ := pipelinetest.SeedTestData(t, pipelinetest.Data{}) + cs, _ := test.SeedTestData(t, pipelinetest.Data{}) p := &test.Params{Tekton: cs.Pipeline} res := Command(p) @@ -35,7 +34,7 @@ func TestPipelineResourceDescribe_Empty(t *testing.T) { t.Errorf("Error expected here") } expected := "Failed to find pipelineresource \"bar\"" - tu.AssertOutput(t, expected, err.Error()) + test.AssertOutput(t, expected, err.Error()) } func TestPipelineResourceDescribe_WithParams(t *testing.T) { @@ -47,7 +46,7 @@ func TestPipelineResourceDescribe_WithParams(t *testing.T) { ), } - cs, _ := pipelinetest.SeedTestData(t, pipelinetest.Data{PipelineResources: pres}) + cs, _ := test.SeedTestData(t, pipelinetest.Data{PipelineResources: pres}) p := &test.Params{Tekton: cs.Pipeline} pipelineresource := Command(p) out, _ := test.ExecuteCommand(pipelineresource, "desc", "test-1", "-n", "test-ns-1") @@ -64,7 +63,7 @@ func TestPipelineResourceDescribe_WithParams(t *testing.T) { "No secret params", "", } - tu.AssertOutput(t, strings.Join(expected, "\n"), out) + test.AssertOutput(t, strings.Join(expected, "\n"), out) } func TestPipelineResourceDescribe_WithSecretParams(t *testing.T) { @@ -78,7 +77,7 @@ func TestPipelineResourceDescribe_WithSecretParams(t *testing.T) { ), } - cs, _ := pipelinetest.SeedTestData(t, pipelinetest.Data{PipelineResources: pres}) + cs, _ := test.SeedTestData(t, pipelinetest.Data{PipelineResources: pres}) p := &test.Params{Tekton: cs.Pipeline} pipelineresource := Command(p) out, _ := test.ExecuteCommand(pipelineresource, "desc", "test-1", "-n", "test-ns-1") @@ -97,5 +96,5 @@ func TestPipelineResourceDescribe_WithSecretParams(t *testing.T) { "githubToken github-secrets", "", } - tu.AssertOutput(t, strings.Join(expected, "\n"), out) + test.AssertOutput(t, strings.Join(expected, "\n"), out) } diff --git a/pkg/cmd/pipelineresource/list_test.go b/pkg/cmd/pipelineresource/list_test.go index 9ab215bcf..b50e673d4 100644 --- a/pkg/cmd/pipelineresource/list_test.go +++ b/pkg/cmd/pipelineresource/list_test.go @@ -20,7 +20,6 @@ import ( "github.com/spf13/cobra" "github.com/tektoncd/cli/pkg/test" - tu "github.com/tektoncd/cli/pkg/test" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" pipelinetest "github.com/tektoncd/pipeline/test" tb "github.com/tektoncd/pipeline/test/builder" @@ -127,36 +126,36 @@ func TestPipelineResourceList(t *testing.T) { if err != nil { t.Errorf("Unexpected error: %v", err) } - tu.AssertOutput(t, strings.Join(td.expected, "\n"), out) + test.AssertOutput(t, strings.Join(td.expected, "\n"), out) }) } } func TestPipelineResourceList_empty(t *testing.T) { - cs, _ := pipelinetest.SeedTestData(t, pipelinetest.Data{}) + cs, _ := test.SeedTestData(t, pipelinetest.Data{}) p := &test.Params{Tekton: cs.Pipeline} pipelineresource := Command(p) out, _ := test.ExecuteCommand(pipelineresource, "list", "-n", "test-ns-3") - tu.AssertOutput(t, msgNoPREsFound+"\n", out) + test.AssertOutput(t, msgNoPREsFound+"\n", out) } func TestPipelineResourceList_invalidType(t *testing.T) { - cs, _ := pipelinetest.SeedTestData(t, pipelinetest.Data{}) + cs, _ := test.SeedTestData(t, pipelinetest.Data{}) p := &test.Params{Tekton: cs.Pipeline} c := Command(p) - _, err := tu.ExecuteCommand(c, "list", "-n", "ns", "-t", "registry") + _, err := test.ExecuteCommand(c, "list", "-n", "ns", "-t", "registry") if err == nil { t.Error("Expecting an error but it's empty") } - tu.AssertOutput(t, "Failed to list pipelineresources. Invalid resource type registry", err.Error()) + test.AssertOutput(t, "Failed to list pipelineresources. Invalid resource type registry", err.Error()) } func command(t *testing.T, pres []*v1alpha1.PipelineResource) *cobra.Command { - cs, _ := pipelinetest.SeedTestData(t, pipelinetest.Data{PipelineResources: pres}) + cs, _ := test.SeedTestData(t, pipelinetest.Data{PipelineResources: pres}) p := &test.Params{Tekton: cs.Pipeline} return Command(p) } diff --git a/pkg/cmd/pipelinerun/cancel_test.go b/pkg/cmd/pipelinerun/cancel_test.go index aaf75d935..83652851b 100644 --- a/pkg/cmd/pipelinerun/cancel_test.go +++ b/pkg/cmd/pipelinerun/cancel_test.go @@ -4,8 +4,9 @@ import ( "errors" "testing" + "github.com/tektoncd/cli/pkg/test" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" - "github.com/tektoncd/pipeline/test" + pipelinetest "github.com/tektoncd/pipeline/test" tb "github.com/tektoncd/pipeline/test/builder" "k8s.io/apimachinery/pkg/runtime" k8stest "k8s.io/client-go/testing" @@ -30,7 +31,7 @@ func Test_cancel_pipelinerun(t *testing.T) { ), } - cs, _ := test.SeedTestData(t, test.Data{PipelineRuns: prs}) + cs, _ := test.SeedTestData(t, pipelinetest.Data{PipelineRuns: prs}) p := &tu.Params{Tekton: cs.Pipeline, Kube: cs.Kube} pRun := Command(p) @@ -44,7 +45,7 @@ func Test_cancel_pipelinerun_not_found(t *testing.T) { prName := "test-pipeline-run-123" - cs, _ := test.SeedTestData(t, test.Data{}) + cs, _ := test.SeedTestData(t, pipelinetest.Data{}) p := &tu.Params{Tekton: cs.Pipeline, Kube: cs.Kube} pRun := Command(p) @@ -72,7 +73,7 @@ func Test_cancel_pipelinerun_client_err(t *testing.T) { ), } - cs, _ := test.SeedTestData(t, test.Data{PipelineRuns: prs}) + cs, _ := test.SeedTestData(t, pipelinetest.Data{PipelineRuns: prs}) p := &tu.Params{Tekton: cs.Pipeline, Kube: cs.Kube} cs.Pipeline.PrependReactor("update", "pipelineruns", func(action k8stest.Action) (bool, runtime.Object, error) { diff --git a/pkg/cmd/pipelinerun/describe.go b/pkg/cmd/pipelinerun/describe.go index 32512855f..619f36c9b 100644 --- a/pkg/cmd/pipelinerun/describe.go +++ b/pkg/cmd/pipelinerun/describe.go @@ -63,7 +63,11 @@ No params {{- else }} NAME VALUE {{- range $i, $p := .PipelineRun.Spec.Params }} -{{ $p.Name }} {{ $p.Value }} +{{- if eq $p.Value.Type "string" }} +{{ $p.Name }} {{ $p.Value.StringVal }} +{{- else }} +{{ $p.Name }} {{ $p.Value.ArrayVal }} +{{- end }} {{- end }} {{- end }} diff --git a/pkg/cmd/pipelinerun/describe_test.go b/pkg/cmd/pipelinerun/describe_test.go index 2a789e5b9..ed694aff8 100644 --- a/pkg/cmd/pipelinerun/describe_test.go +++ b/pkg/cmd/pipelinerun/describe_test.go @@ -20,19 +20,18 @@ import ( "github.com/google/go-cmp/cmp" "github.com/jonboulle/clockwork" - "github.com/knative/pkg/apis" "github.com/tektoncd/cli/pkg/test" - tu "github.com/tektoncd/cli/pkg/test" cb "github.com/tektoncd/cli/pkg/test/builder" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" "github.com/tektoncd/pipeline/pkg/reconciler/v1alpha1/pipelinerun/resources" pipelinetest "github.com/tektoncd/pipeline/test" tb "github.com/tektoncd/pipeline/test/builder" corev1 "k8s.io/api/core/v1" + "knative.dev/pkg/apis" ) func TestPipelineRunDescribe_not_found(t *testing.T) { - cs, _ := pipelinetest.SeedTestData(t, pipelinetest.Data{}) + cs, _ := test.SeedTestData(t, pipelinetest.Data{}) p := &test.Params{Tekton: cs.Pipeline} pipelinerun := Command(p) @@ -41,7 +40,7 @@ func TestPipelineRunDescribe_not_found(t *testing.T) { t.Errorf("Expected error, did not get any") } expected := "Failed to find pipelinerun \"bar\"" - tu.AssertOutput(t, expected, err.Error()) + test.AssertOutput(t, expected, err.Error()) } func TestPipelineRunDescribe_only_taskrun(t *testing.T) { @@ -52,7 +51,7 @@ func TestPipelineRunDescribe_only_taskrun(t *testing.T) { tb.TaskRunStatus( tb.TaskRunStartTime(clock.Now().Add(2*time.Minute)), cb.TaskRunCompletionTime(clock.Now().Add(5*time.Minute)), - tb.Condition(apis.Condition{ + tb.StatusCondition(apis.Condition{ Type: apis.ConditionSucceeded, Status: corev1.ConditionTrue, }), @@ -60,18 +59,16 @@ func TestPipelineRunDescribe_only_taskrun(t *testing.T) { ), } - cs, _ := pipelinetest.SeedTestData(t, pipelinetest.Data{ + cs, _ := test.SeedTestData(t, pipelinetest.Data{ PipelineRuns: []*v1alpha1.PipelineRun{ tb.PipelineRun("pipeline-run", "ns", cb.PipelineRunCreationTimestamp(clock.Now()), tb.PipelineRunLabel("tekton.dev/pipeline", "pipeline"), tb.PipelineRunSpec("pipeline"), tb.PipelineRunStatus( - tb.PipelineRunTaskRunsStatus(map[string]*v1alpha1.PipelineRunTaskRunStatus{ - "tr-1": { - PipelineTaskName: "t-1", - Status: &trs[0].Status, - }, + tb.PipelineRunTaskRunsStatus("tr-1", &v1alpha1.PipelineRunTaskRunStatus{ + PipelineTaskName: "t-1", + Status: &trs[0].Status, }), tb.PipelineRunStatusCondition(apis.Condition{ Status: corev1.ConditionTrue, @@ -111,7 +108,7 @@ NAME TASK NAME STARTED DURATION STATUS tr-1 t-1 8 minutes ago 3 minutes Succeeded ` - tu.AssertOutput(t, expected, actual) + test.AssertOutput(t, expected, actual) } func TestPipelineRunDescribe_multiple_taskrun_ordering(t *testing.T) { @@ -122,7 +119,7 @@ func TestPipelineRunDescribe_multiple_taskrun_ordering(t *testing.T) { tb.TaskRunStatus( tb.TaskRunStartTime(clock.Now().Add(2*time.Minute)), cb.TaskRunCompletionTime(clock.Now().Add(5*time.Minute)), - tb.Condition(apis.Condition{ + tb.StatusCondition(apis.Condition{ Type: apis.ConditionSucceeded, Status: corev1.ConditionTrue, }), @@ -132,7 +129,7 @@ func TestPipelineRunDescribe_multiple_taskrun_ordering(t *testing.T) { tb.TaskRunStatus( tb.TaskRunStartTime(clock.Now().Add(5*time.Minute)), cb.TaskRunCompletionTime(clock.Now().Add(9*time.Minute)), - tb.Condition(apis.Condition{ + tb.StatusCondition(apis.Condition{ Type: apis.ConditionSucceeded, Status: corev1.ConditionTrue, }), @@ -140,22 +137,20 @@ func TestPipelineRunDescribe_multiple_taskrun_ordering(t *testing.T) { ), } - cs, _ := pipelinetest.SeedTestData(t, pipelinetest.Data{ + cs, _ := test.SeedTestData(t, pipelinetest.Data{ PipelineRuns: []*v1alpha1.PipelineRun{ tb.PipelineRun("pipeline-run", "ns", cb.PipelineRunCreationTimestamp(clock.Now()), tb.PipelineRunLabel("tekton.dev/pipeline", "pipeline"), tb.PipelineRunSpec("pipeline"), tb.PipelineRunStatus( - tb.PipelineRunTaskRunsStatus(map[string]*v1alpha1.PipelineRunTaskRunStatus{ - "tr-1": { - PipelineTaskName: "t-1", - Status: &trs[0].Status, - }, - "tr-2": { - PipelineTaskName: "t-2", - Status: &trs[1].Status, - }, + tb.PipelineRunTaskRunsStatus("tr-1", &v1alpha1.PipelineRunTaskRunStatus{ + PipelineTaskName: "t-1", + Status: &trs[0].Status, + }), + tb.PipelineRunTaskRunsStatus("tr-2", &v1alpha1.PipelineRunTaskRunStatus{ + PipelineTaskName: "t-2", + Status: &trs[0].Status, }), tb.PipelineRunStatusCondition(apis.Condition{ Status: corev1.ConditionTrue, @@ -169,9 +164,8 @@ func TestPipelineRunDescribe_multiple_taskrun_ordering(t *testing.T) { }) p := &test.Params{Tekton: cs.Pipeline, Clock: clock} - pipelinerun := Command(p) - clock.Advance(10 * time.Minute) + clock.Advance(9 * time.Minute) actual, err := test.ExecuteCommand(pipelinerun, "desc", "pipeline-run", "-n", "ns") if err != nil { t.Errorf("Unexpected error: %v", err) @@ -181,8 +175,8 @@ Namespace: ns Pipeline Ref: pipeline Status -STARTED DURATION STATUS -10 minutes ago 15 minutes Succeeded +STARTED DURATION STATUS +9 minutes ago 15 minutes Succeeded Resources No resources @@ -192,8 +186,8 @@ No params Taskruns NAME TASK NAME STARTED DURATION STATUS -tr-2 t-2 5 minutes ago 4 minutes Succeeded -tr-1 t-1 8 minutes ago 3 minutes Succeeded +tr-2 t-2 7 minutes ago 3 minutes Succeeded +tr-1 t-1 7 minutes ago 3 minutes Succeeded ` if d := cmp.Diff(expected, actual); d != "" { t.Errorf("Unexpected output mismatch: %s", d) @@ -209,7 +203,7 @@ func TestPipelineRunDescribe_failed(t *testing.T) { tb.TaskRunStatus( tb.TaskRunStartTime(clock.Now().Add(2*time.Minute)), cb.TaskRunCompletionTime(clock.Now().Add(5*time.Minute)), - tb.Condition(apis.Condition{ + tb.StatusCondition(apis.Condition{ Status: corev1.ConditionFalse, Reason: resources.ReasonFailed, Message: "Testing tr failed", @@ -218,7 +212,7 @@ func TestPipelineRunDescribe_failed(t *testing.T) { ), } - cs, _ := pipelinetest.SeedTestData(t, pipelinetest.Data{ + cs, _ := test.SeedTestData(t, pipelinetest.Data{ PipelineRuns: []*v1alpha1.PipelineRun{ tb.PipelineRun("pipeline-run", "ns", cb.PipelineRunCreationTimestamp(clock.Now()), @@ -227,11 +221,9 @@ func TestPipelineRunDescribe_failed(t *testing.T) { tb.PipelineRunServiceAccount("test-sa"), ), tb.PipelineRunStatus( - tb.PipelineRunTaskRunsStatus(map[string]*v1alpha1.PipelineRunTaskRunStatus{ - "tr-1": { - PipelineTaskName: "t-1", - Status: &trs[0].Status, - }, + tb.PipelineRunTaskRunsStatus("tr-1", &v1alpha1.PipelineRunTaskRunStatus{ + PipelineTaskName: "t-1", + Status: &trs[0].Status, }), tb.PipelineRunStatusCondition(apis.Condition{ Status: corev1.ConditionFalse, @@ -276,7 +268,7 @@ NAME TASK NAME STARTED DURATION STATUS tr-1 t-1 8 minutes ago 3 minutes Failed ` - tu.AssertOutput(t, expected, actual) + test.AssertOutput(t, expected, actual) } func TestPipelineRunDescribe_with_resources_taskrun(t *testing.T) { @@ -287,7 +279,7 @@ func TestPipelineRunDescribe_with_resources_taskrun(t *testing.T) { tb.TaskRunStatus( tb.TaskRunStartTime(clock.Now().Add(2*time.Minute)), cb.TaskRunCompletionTime(clock.Now().Add(5*time.Minute)), - tb.Condition(apis.Condition{ + tb.StatusCondition(apis.Condition{ Type: apis.ConditionSucceeded, Status: corev1.ConditionTrue, }), @@ -295,7 +287,7 @@ func TestPipelineRunDescribe_with_resources_taskrun(t *testing.T) { ), } - cs, _ := pipelinetest.SeedTestData(t, pipelinetest.Data{ + cs, _ := test.SeedTestData(t, pipelinetest.Data{ PipelineRuns: []*v1alpha1.PipelineRun{ tb.PipelineRun("pipeline-run", "ns", cb.PipelineRunCreationTimestamp(clock.Now()), @@ -308,11 +300,9 @@ func TestPipelineRunDescribe_with_resources_taskrun(t *testing.T) { ), ), tb.PipelineRunStatus( - tb.PipelineRunTaskRunsStatus(map[string]*v1alpha1.PipelineRunTaskRunStatus{ - "tr-1": { - PipelineTaskName: "t-1", - Status: &trs[0].Status, - }, + tb.PipelineRunTaskRunsStatus("tr-1", &v1alpha1.PipelineRunTaskRunStatus{ + PipelineTaskName: "t-1", + Status: &trs[0].Status, }), tb.PipelineRunStatusCondition(apis.Condition{ Status: corev1.ConditionTrue, @@ -355,13 +345,13 @@ NAME TASK NAME STARTED DURATION STATUS tr-1 t-1 8 minutes ago 3 minutes Succeeded ` - tu.AssertOutput(t, expected, actual) + test.AssertOutput(t, expected, actual) } func TestPipelineRunDescribe_without_start_time(t *testing.T) { clock := clockwork.NewFakeClock() - cs, _ := pipelinetest.SeedTestData(t, pipelinetest.Data{ + cs, _ := test.SeedTestData(t, pipelinetest.Data{ PipelineRuns: []*v1alpha1.PipelineRun{ tb.PipelineRun("pipeline-run", "ns", cb.PipelineRunCreationTimestamp(clock.Now()), @@ -398,5 +388,5 @@ Taskruns No taskruns ` - tu.AssertOutput(t, expected, actual) + test.AssertOutput(t, expected, actual) } diff --git a/pkg/cmd/pipelinerun/list_test.go b/pkg/cmd/pipelinerun/list_test.go index 058829d8a..7d1076101 100644 --- a/pkg/cmd/pipelinerun/list_test.go +++ b/pkg/cmd/pipelinerun/list_test.go @@ -21,16 +21,15 @@ import ( "time" "github.com/jonboulle/clockwork" - "github.com/knative/pkg/apis" "github.com/spf13/cobra" "github.com/tektoncd/cli/pkg/test" - tu "github.com/tektoncd/cli/pkg/test" cb "github.com/tektoncd/cli/pkg/test/builder" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" "github.com/tektoncd/pipeline/pkg/reconciler/v1alpha1/pipelinerun/resources" pipelinetest "github.com/tektoncd/pipeline/test" tb "github.com/tektoncd/pipeline/test/builder" corev1 "k8s.io/api/core/v1" + "knative.dev/pkg/apis" ) func TestListPipelineRuns(t *testing.T) { @@ -184,16 +183,16 @@ func TestListPipelineRuns(t *testing.T) { } func TestListPipeline_empty(t *testing.T) { - cs, _ := pipelinetest.SeedTestData(t, pipelinetest.Data{}) - p := &tu.Params{Tekton: cs.Pipeline} + cs, _ := test.SeedTestData(t, pipelinetest.Data{}) + p := &test.Params{Tekton: cs.Pipeline} pipeline := Command(p) - output, err := tu.ExecuteCommand(pipeline, "list", "-n", "ns") + output, err := test.ExecuteCommand(pipeline, "list", "-n", "ns") if err != nil { t.Errorf("Unexpected error: %v", err) } - tu.AssertOutput(t, emptyMsg+"\n", output) + test.AssertOutput(t, emptyMsg+"\n", output) } func command(t *testing.T, prs []*v1alpha1.PipelineRun, now time.Time) *cobra.Command { @@ -201,7 +200,7 @@ func command(t *testing.T, prs []*v1alpha1.PipelineRun, now time.Time) *cobra.Co clock := clockwork.NewFakeClockAt(now) clock.Advance(time.Duration(60) * time.Minute) - cs, _ := pipelinetest.SeedTestData(t, pipelinetest.Data{PipelineRuns: prs}) + cs, _ := test.SeedTestData(t, pipelinetest.Data{PipelineRuns: prs}) p := &test.Params{Tekton: cs.Pipeline, Clock: clock} diff --git a/pkg/cmd/pipelinerun/log_test.go b/pkg/cmd/pipelinerun/log_test.go index af37145f4..5c0166bc3 100644 --- a/pkg/cmd/pipelinerun/log_test.go +++ b/pkg/cmd/pipelinerun/log_test.go @@ -21,23 +21,23 @@ import ( "time" "github.com/jonboulle/clockwork" - "github.com/knative/pkg/apis" "github.com/tektoncd/cli/pkg/cli" "github.com/tektoncd/cli/pkg/helper/pods/fake" "github.com/tektoncd/cli/pkg/helper/pods/stream" - tu "github.com/tektoncd/cli/pkg/test" + "github.com/tektoncd/cli/pkg/test" cb "github.com/tektoncd/cli/pkg/test/builder" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" "github.com/tektoncd/pipeline/pkg/reconciler/v1alpha1/pipelinerun/resources" - "github.com/tektoncd/pipeline/test" + pipelinetest "github.com/tektoncd/pipeline/test" tb "github.com/tektoncd/pipeline/test/builder" corev1 "k8s.io/api/core/v1" + "knative.dev/pkg/apis" ) func TestLog_no_pipelinerun_argument(t *testing.T) { - c := Command(&tu.Params{}) + c := Command(&test.Params{}) - _, err := tu.ExecuteCommand(c, "logs", "-n", "ns") + _, err := test.ExecuteCommand(c, "logs", "-n", "ns") if err == nil { t.Error("Expecting an error but it's empty") @@ -56,13 +56,13 @@ func TestLog_missing_pipelinerun(t *testing.T) { ), ), } - cs, _ := test.SeedTestData(t, test.Data{PipelineRuns: pr}) - p := &tu.Params{Tekton: cs.Pipeline, Kube: cs.Kube} + cs, _ := test.SeedTestData(t, pipelinetest.Data{PipelineRuns: pr}) + p := &test.Params{Tekton: cs.Pipeline, Kube: cs.Kube} c := Command(p) - _, err := tu.ExecuteCommand(c, "logs", "output-pipeline-2", "-n", "ns") + _, err := test.ExecuteCommand(c, "logs", "output-pipeline-2", "-n", "ns") expected := "pipelineruns.tekton.dev \"output-pipeline-2\" not found" - tu.AssertOutput(t, expected, err.Error()) + test.AssertOutput(t, expected, err.Error()) } func TestPipelinerunLogs(t *testing.T) { @@ -97,7 +97,7 @@ func TestPipelinerunLogs(t *testing.T) { tb.TaskRunStatus( tb.PodName(tr1Pod), tb.TaskRunStartTime(tr1StartTime), - tb.Condition(apis.Condition{ + tb.StatusCondition(apis.Condition{ Type: apis.ConditionSucceeded, Status: corev1.ConditionTrue, }), @@ -121,7 +121,7 @@ func TestPipelinerunLogs(t *testing.T) { tb.TaskRunStatus( tb.PodName(tr2Pod), tb.TaskRunStartTime(tr2StartTime), - tb.Condition(apis.Condition{ + tb.StatusCondition(apis.Condition{ Type: apis.ConditionSucceeded, Status: corev1.ConditionTrue, }), @@ -140,17 +140,6 @@ func TestPipelinerunLogs(t *testing.T) { ), } - prtrs := map[string]*v1alpha1.PipelineRunTaskRunStatus{ - tr1Name: { - PipelineTaskName: task1Name, - Status: &trs[0].Status, - }, - tr2Name: { - PipelineTaskName: task2Name, - Status: &trs[1].Status, - }, - } - prs := []*v1alpha1.PipelineRun{ tb.PipelineRun(prName, ns, tb.PipelineRunLabel("tekton.dev/pipeline", prName), @@ -159,9 +148,14 @@ func TestPipelinerunLogs(t *testing.T) { Status: corev1.ConditionTrue, Reason: resources.ReasonSucceeded, }), - tb.PipelineRunTaskRunsStatus( - prtrs, - ), + tb.PipelineRunTaskRunsStatus(tr1Name, &v1alpha1.PipelineRunTaskRunStatus{ + PipelineTaskName: task1Name, + Status: &trs[0].Status, + }), + tb.PipelineRunTaskRunsStatus(tr2Name, &v1alpha1.PipelineRunTaskRunStatus{ + PipelineTaskName: task2Name, + Status: &trs[1].Status, + }), ), ), } @@ -250,14 +244,14 @@ func TestPipelinerunLogs(t *testing.T) { for _, s := range scenarios { t.Run(s.name, func(t *testing.T) { - cs, _ := test.SeedTestData(t, test.Data{PipelineRuns: prs, Pipelines: pps, TaskRuns: trs, Pods: p}) + cs, _ := test.SeedTestData(t, pipelinetest.Data{PipelineRuns: prs, Pipelines: pps, TaskRuns: trs, Pods: p}) prlo := logOpts(prName, ns, cs, fake.Streamer(fakeLogs), s.allSteps, false, s.tasks...) output, _ := fetchLogs(prlo) expected := strings.Join(s.expectedLogs, "\n") + "\n" - tu.AssertOutput(t, expected, output) + test.AssertOutput(t, expected, output) }) } } @@ -293,7 +287,7 @@ func TestPipelinerunLog_completed_taskrun_only(t *testing.T) { tb.TaskRunStatus( tb.PodName(tr1Pod), tb.TaskRunStartTime(tr1StartTime), - tb.Condition(apis.Condition{ + tb.StatusCondition(apis.Condition{ Type: apis.ConditionSucceeded, Status: corev1.ConditionTrue, }), @@ -312,13 +306,6 @@ func TestPipelinerunLog_completed_taskrun_only(t *testing.T) { ), } - prtrs := map[string]*v1alpha1.PipelineRunTaskRunStatus{ - tr1Name: { - PipelineTaskName: task1Name, - Status: &trs[0].Status, - }, - } - prs := []*v1alpha1.PipelineRun{ tb.PipelineRun(prName, ns, tb.PipelineRunLabel("tekton.dev/pipeline", prName), @@ -327,9 +314,10 @@ func TestPipelinerunLog_completed_taskrun_only(t *testing.T) { Status: corev1.ConditionTrue, Reason: resources.ReasonRunning, }), - tb.PipelineRunTaskRunsStatus( - prtrs, - ), + tb.PipelineRunTaskRunsStatus(tr1Name, &v1alpha1.PipelineRunTaskRunStatus{ + PipelineTaskName: task1Name, + Status: &trs[0].Status, + }), ), ), } @@ -362,7 +350,7 @@ func TestPipelinerunLog_completed_taskrun_only(t *testing.T) { ), ) - cs, _ := test.SeedTestData(t, test.Data{PipelineRuns: prs, Pipelines: pps, TaskRuns: trs, Pods: p}) + cs, _ := test.SeedTestData(t, pipelinetest.Data{PipelineRuns: prs, Pipelines: pps, TaskRuns: trs, Pods: p}) prlo := logOpts(prName, ns, cs, fake.Streamer(fakeLogStream), false, false) output, _ := fetchLogs(prlo) @@ -372,7 +360,7 @@ func TestPipelinerunLog_completed_taskrun_only(t *testing.T) { } expected := strings.Join(expectedLogs, "\n") + "\n" - tu.AssertOutput(t, expected, output) + test.AssertOutput(t, expected, output) } func TestPipelinerunLog_follow_mode(t *testing.T) { @@ -397,7 +385,7 @@ func TestPipelinerunLog_follow_mode(t *testing.T) { tb.TaskRunStatus( tb.PodName(tr1Pod), tb.TaskRunStartTime(tr1StartTime), - tb.Condition(apis.Condition{ + tb.StatusCondition(apis.Condition{ Type: apis.ConditionSucceeded, Status: corev1.ConditionTrue, }), @@ -416,13 +404,6 @@ func TestPipelinerunLog_follow_mode(t *testing.T) { ), } - prtrs := map[string]*v1alpha1.PipelineRunTaskRunStatus{ - tr1Name: { - PipelineTaskName: task1Name, - Status: &trs[0].Status, - }, - } - prs := []*v1alpha1.PipelineRun{ tb.PipelineRun(prName, ns, tb.PipelineRunLabel("tekton.dev/pipeline", prName), @@ -431,9 +412,10 @@ func TestPipelinerunLog_follow_mode(t *testing.T) { Status: corev1.ConditionTrue, Reason: resources.ReasonRunning, }), - tb.PipelineRunTaskRunsStatus( - prtrs, - ), + tb.PipelineRunTaskRunsStatus(tr1Name, &v1alpha1.PipelineRunTaskRunStatus{ + PipelineTaskName: task1Name, + Status: &trs[0].Status, + }), ), ), } @@ -471,7 +453,7 @@ func TestPipelinerunLog_follow_mode(t *testing.T) { ), ) - cs, _ := test.SeedTestData(t, test.Data{PipelineRuns: prs, Pipelines: pps, TaskRuns: trs, Pods: p}) + cs, _ := test.SeedTestData(t, pipelinetest.Data{PipelineRuns: prs, Pipelines: pps, TaskRuns: trs, Pods: p}) prlo := logOpts(prName, ns, cs, fake.Streamer(fakeLogStream), false, true) output, _ := fetchLogs(prlo) @@ -484,11 +466,11 @@ func TestPipelinerunLog_follow_mode(t *testing.T) { } expected := strings.Join(expectedLogs, "\n") + "\n" - tu.AssertOutput(t, expected, output) + test.AssertOutput(t, expected, output) } -func logOpts(name string, ns string, cs test.Clients, streamer stream.NewStreamerFunc, allSteps bool, follow bool, onlyTasks ...string) *LogOptions { - p := tu.Params{ +func logOpts(name string, ns string, cs pipelinetest.Clients, streamer stream.NewStreamerFunc, allSteps bool, follow bool, onlyTasks ...string) *LogOptions { + p := test.Params{ Kube: cs.Kube, Tekton: cs.Pipeline, } diff --git a/pkg/cmd/task/list_test.go b/pkg/cmd/task/list_test.go index e03e1514f..45e895f93 100644 --- a/pkg/cmd/task/list_test.go +++ b/pkg/cmd/task/list_test.go @@ -22,7 +22,6 @@ import ( "github.com/google/go-cmp/cmp" "github.com/jonboulle/clockwork" "github.com/tektoncd/cli/pkg/test" - tu "github.com/tektoncd/cli/pkg/test" cb "github.com/tektoncd/cli/pkg/test/builder" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" pipelinetest "github.com/tektoncd/pipeline/test" @@ -30,7 +29,7 @@ import ( ) func TestTaskList_Empty(t *testing.T) { - cs, _ := pipelinetest.SeedTestData(t, pipelinetest.Data{}) + cs, _ := test.SeedTestData(t, pipelinetest.Data{}) p := &test.Params{Tekton: cs.Pipeline} task := Command(p) @@ -38,7 +37,7 @@ func TestTaskList_Empty(t *testing.T) { if err != nil { t.Errorf("Unexpected error: %v", err) } - tu.AssertOutput(t, emptyMsg+"\n", output) + test.AssertOutput(t, emptyMsg+"\n", output) } func TestTaskListOnlyTasks(t *testing.T) { @@ -49,7 +48,7 @@ func TestTaskListOnlyTasks(t *testing.T) { tb.Task("bananas", "namespace", cb.TaskCreationTime(clock.Now().Add(-512*time.Hour))), } - cs, _ := pipelinetest.SeedTestData(t, pipelinetest.Data{Tasks: tasks}) + cs, _ := test.SeedTestData(t, pipelinetest.Data{Tasks: tasks}) p := &test.Params{Tekton: cs.Pipeline, Clock: clock} task := Command(p) diff --git a/pkg/cmd/taskrun/list_test.go b/pkg/cmd/taskrun/list_test.go index cbe945a59..52cdd1b1d 100644 --- a/pkg/cmd/taskrun/list_test.go +++ b/pkg/cmd/taskrun/list_test.go @@ -21,7 +21,6 @@ import ( "time" "github.com/jonboulle/clockwork" - "github.com/knative/pkg/apis" "github.com/spf13/cobra" "github.com/tektoncd/cli/pkg/test" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" @@ -30,6 +29,7 @@ import ( tb "github.com/tektoncd/pipeline/test/builder" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "knative.dev/pkg/apis" ) func TestListTaskRuns(t *testing.T) { @@ -42,7 +42,7 @@ func TestListTaskRuns(t *testing.T) { tb.TaskRunLabel("tekton.dev/task", "random"), tb.TaskRunSpec(tb.TaskRunTaskRef("random")), tb.TaskRunStatus( - tb.Condition(apis.Condition{ + tb.StatusCondition(apis.Condition{ Status: corev1.ConditionTrue, Reason: resources.ReasonSucceeded, }), @@ -52,7 +52,7 @@ func TestListTaskRuns(t *testing.T) { tb.TaskRunLabel("tekton.dev/task", "bar"), tb.TaskRunSpec(tb.TaskRunTaskRef("bar")), tb.TaskRunStatus( - tb.Condition(apis.Condition{ + tb.StatusCondition(apis.Condition{ Status: corev1.ConditionTrue, Reason: resources.ReasonSucceeded, }), @@ -64,7 +64,7 @@ func TestListTaskRuns(t *testing.T) { tb.TaskRunLabel("tekton.dev/Task", "random"), tb.TaskRunSpec(tb.TaskRunTaskRef("random")), tb.TaskRunStatus( - tb.Condition(apis.Condition{ + tb.StatusCondition(apis.Condition{ Status: corev1.ConditionUnknown, Reason: resources.ReasonRunning, }), @@ -75,7 +75,7 @@ func TestListTaskRuns(t *testing.T) { tb.TaskRunLabel("tekton.dev/Task", "random"), tb.TaskRunSpec(tb.TaskRunTaskRef("random")), tb.TaskRunStatus( - tb.Condition(apis.Condition{ + tb.StatusCondition(apis.Condition{ Status: corev1.ConditionFalse, Reason: resources.ReasonFailed, }), @@ -87,7 +87,7 @@ func TestListTaskRuns(t *testing.T) { tb.TaskRunLabel("tekton.dev/Task", "random"), tb.TaskRunSpec(tb.TaskRunTaskRef("random")), tb.TaskRunStatus( - tb.Condition(apis.Condition{ + tb.StatusCondition(apis.Condition{ Status: corev1.ConditionFalse, Reason: resources.ReasonFailed, }), @@ -233,7 +233,7 @@ func command(t *testing.T, trs []*v1alpha1.TaskRun, now time.Time) *cobra.Comman clock := clockwork.NewFakeClockAt(now) clock.Advance(time.Duration(60) * time.Minute) - cs, _ := pipelinetest.SeedTestData(t, pipelinetest.Data{TaskRuns: trs}) + cs, _ := test.SeedTestData(t, pipelinetest.Data{TaskRuns: trs}) p := &test.Params{Tekton: cs.Pipeline, Clock: clock} diff --git a/pkg/cmd/taskrun/logs_test.go b/pkg/cmd/taskrun/logs_test.go index e5988687f..c25df4fa0 100644 --- a/pkg/cmd/taskrun/logs_test.go +++ b/pkg/cmd/taskrun/logs_test.go @@ -21,24 +21,24 @@ import ( "time" "github.com/jonboulle/clockwork" - "github.com/knative/pkg/apis" "github.com/tektoncd/cli/pkg/cli" "github.com/tektoncd/cli/pkg/helper/pods/fake" "github.com/tektoncd/cli/pkg/helper/pods/stream" - tu "github.com/tektoncd/cli/pkg/test" + "github.com/tektoncd/cli/pkg/test" cb "github.com/tektoncd/cli/pkg/test/builder" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" - "github.com/tektoncd/pipeline/test" + pipelinetest "github.com/tektoncd/pipeline/test" tb "github.com/tektoncd/pipeline/test/builder" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/watch" k8stest "k8s.io/client-go/testing" + "knative.dev/pkg/apis" ) func TestLog_no_taskrun_arg(t *testing.T) { - c := Command(&tu.Params{}) + c := Command(&test.Params{}) - _, err := tu.ExecuteCommand(c, "logs", "-n", "ns") + _, err := test.ExecuteCommand(c, "logs", "-n", "ns") if err == nil { t.Error("Expecting an error but it's empty") } @@ -48,15 +48,15 @@ func TestLog_missing_taskrun(t *testing.T) { tr := []*v1alpha1.TaskRun{ tb.TaskRun("output-taskrun-1", "ns"), } - cs, _ := test.SeedTestData(t, test.Data{TaskRuns: tr}) + cs, _ := test.SeedTestData(t, pipelinetest.Data{TaskRuns: tr}) watcher := watch.NewFake() cs.Kube.PrependWatchReactor("pods", k8stest.DefaultWatchReactor(watcher, nil)) - p := &tu.Params{Tekton: cs.Pipeline, Kube: cs.Kube} + p := &test.Params{Tekton: cs.Pipeline, Kube: cs.Kube} c := Command(p) - got, _ := tu.ExecuteCommand(c, "logs", "output-taskrun-2", "-n", "ns") + got, _ := test.ExecuteCommand(c, "logs", "output-taskrun-2", "-n", "ns") expected := "Error: " + msgTRNotFoundErr + " : taskruns.tekton.dev \"output-taskrun-2\" not found\n" - tu.AssertOutput(t, expected, got) + test.AssertOutput(t, expected, got) } func TestLog_taskrun_logs(t *testing.T) { @@ -75,7 +75,7 @@ func TestLog_taskrun_logs(t *testing.T) { tb.TaskRunStatus( tb.PodName(trPod), tb.TaskRunStartTime(trStartTime), - tb.Condition(apis.Condition{ + tb.StatusCondition(apis.Condition{ Type: apis.ConditionSucceeded, Status: corev1.ConditionTrue, }), @@ -112,7 +112,7 @@ func TestLog_taskrun_logs(t *testing.T) { ), ) - cs, _ := test.SeedTestData(t, test.Data{TaskRuns: trs, Pods: ps}) + cs, _ := test.SeedTestData(t, pipelinetest.Data{TaskRuns: trs, Pods: ps}) trlo := logOpts(trName, ns, cs, fake.Streamer(logs), false, false) output, _ := fetchLogs(trlo) @@ -122,7 +122,7 @@ func TestLog_taskrun_logs(t *testing.T) { } expected := strings.Join(expectedLogs, "\n") + "\n" - tu.AssertOutput(t, expected, output) + test.AssertOutput(t, expected, output) } func TestLog_taskrun_all_steps(t *testing.T) { @@ -145,7 +145,7 @@ func TestLog_taskrun_all_steps(t *testing.T) { tb.TaskRunStatus( tb.PodName(trPod), tb.TaskRunStartTime(trStartTime), - tb.Condition(apis.Condition{ + tb.StatusCondition(apis.Condition{ Type: apis.ConditionSucceeded, Status: corev1.ConditionTrue, }), @@ -189,7 +189,7 @@ func TestLog_taskrun_all_steps(t *testing.T) { ), ) - cs, _ := test.SeedTestData(t, test.Data{TaskRuns: trs, Pods: p}) + cs, _ := test.SeedTestData(t, pipelinetest.Data{TaskRuns: trs, Pods: p}) trl := logOpts(trName, ns, cs, fake.Streamer(logs), true, false) output, _ := fetchLogs(trl) @@ -202,7 +202,7 @@ func TestLog_taskrun_all_steps(t *testing.T) { } expected := strings.Join(expectedLogs, "\n") + "\n" - tu.AssertOutput(t, expected, output) + test.AssertOutput(t, expected, output) } func TestLog_taskrun_follow_mode(t *testing.T) { @@ -224,7 +224,7 @@ func TestLog_taskrun_follow_mode(t *testing.T) { tb.TaskRunStatus( tb.PodName(trPod), tb.TaskRunStartTime(trStartTime), - tb.Condition(apis.Condition{ + tb.StatusCondition(apis.Condition{ Type: apis.ConditionSucceeded, Status: corev1.ConditionTrue, }), @@ -268,7 +268,7 @@ func TestLog_taskrun_follow_mode(t *testing.T) { ), ) - cs, _ := test.SeedTestData(t, test.Data{TaskRuns: trs, Pods: p}) + cs, _ := test.SeedTestData(t, pipelinetest.Data{TaskRuns: trs, Pods: p}) trlo := logOpts(trName, ns, cs, fake.Streamer(logs), false, true) output, _ := fetchLogs(trlo) @@ -279,11 +279,11 @@ func TestLog_taskrun_follow_mode(t *testing.T) { } expected := strings.Join(expectedLogs, "\n") + "\n" - tu.AssertOutput(t, expected, output) + test.AssertOutput(t, expected, output) } -func logOpts(run, ns string, cs test.Clients, streamer stream.NewStreamerFunc, allSteps bool, follow bool) *LogOptions { - p := tu.Params{ +func logOpts(run, ns string, cs pipelinetest.Clients, streamer stream.NewStreamerFunc, allSteps bool, follow bool) *LogOptions { + p := test.Params{ Kube: cs.Kube, Tekton: cs.Pipeline, } diff --git a/pkg/formatted/k8s.go b/pkg/formatted/k8s.go index 490b74046..ccf3aaa12 100644 --- a/pkg/formatted/k8s.go +++ b/pkg/formatted/k8s.go @@ -1,12 +1,12 @@ package formatted import ( - "github.com/knative/pkg/apis" corev1 "k8s.io/api/core/v1" + "knative.dev/pkg/apis/duck/v1beta1" ) // Condition returns a human readable text based on the status of the Condition -func Condition(c []apis.Condition) string { +func Condition(c v1beta1.Conditions) string { var status string if len(c) == 0 { diff --git a/pkg/helper/pipelinerun/tracker_test.go b/pkg/helper/pipelinerun/tracker_test.go index 623fb74cb..f4327d016 100644 --- a/pkg/helper/pipelinerun/tracker_test.go +++ b/pkg/helper/pipelinerun/tracker_test.go @@ -18,17 +18,18 @@ import ( "testing" "time" - "github.com/knative/pkg/apis" trh "github.com/tektoncd/cli/pkg/helper/taskrun" + "github.com/tektoncd/cli/pkg/test" clitest "github.com/tektoncd/cli/pkg/test" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" "github.com/tektoncd/pipeline/pkg/client/clientset/versioned" "github.com/tektoncd/pipeline/pkg/reconciler/v1alpha1/pipelinerun/resources" - "github.com/tektoncd/pipeline/test" + pipelinetest "github.com/tektoncd/pipeline/test" tb "github.com/tektoncd/pipeline/test/builder" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/watch" k8stest "k8s.io/client-go/testing" + "knative.dev/pkg/apis" ) func TestTracker_pipelinerun_complete(t *testing.T) { @@ -99,13 +100,6 @@ func TestTracker_pipelinerun_complete(t *testing.T) { ), } - initialTRStatus := map[string]*v1alpha1.PipelineRunTaskRunStatus{ - tr1Name: { - PipelineTaskName: task1Name, - Status: &taskruns[0].Status, - }, - } - initialPR := []*v1alpha1.PipelineRun{ tb.PipelineRun(prName, ns, tb.PipelineRunLabel("tekton.dev/pipeline", prName), @@ -114,27 +108,32 @@ func TestTracker_pipelinerun_complete(t *testing.T) { Status: corev1.ConditionUnknown, Reason: resources.ReasonRunning, }), - tb.PipelineRunTaskRunsStatus( - initialTRStatus, - ), + tb.PipelineRunTaskRunsStatus(tr1Name, &v1alpha1.PipelineRunTaskRunStatus{ + PipelineTaskName: task1Name, + Status: &taskruns[0].Status, + }), ), ), } - finalTRStatus := map[string]*v1alpha1.PipelineRunTaskRunStatus{ - tr1Name: { + prStatusFn := tb.PipelineRunStatus( + tb.PipelineRunStatusCondition(apis.Condition{ + Status: corev1.ConditionTrue, + Reason: resources.ReasonSucceeded, + }), + tb.PipelineRunTaskRunsStatus(tr1Name, &v1alpha1.PipelineRunTaskRunStatus{ PipelineTaskName: task1Name, Status: &taskruns[0].Status, - }, - tr2Name: { + }), + tb.PipelineRunTaskRunsStatus(tr2Name, &v1alpha1.PipelineRunTaskRunStatus{ PipelineTaskName: task2Name, Status: &taskruns[1].Status, - }, - } + }), + ) + pr := &v1alpha1.PipelineRun{} + prStatusFn(pr) - finalPRStatus := prStatus(corev1.ConditionTrue, resources.ReasonSucceeded, finalTRStatus) - - tc := startPipelineRun(t, test.Data{PipelineRuns: initialPR, TaskRuns: taskruns}, finalPRStatus) + tc := startPipelineRun(t, pipelinetest.Data{PipelineRuns: initialPR, TaskRuns: taskruns}, pr.Status) tracker := NewTracker(pipelineName, ns, tc) output := taskRunsFor(s.tasks, tracker) @@ -142,22 +141,6 @@ func TestTracker_pipelinerun_complete(t *testing.T) { } } -func prStatus(status corev1.ConditionStatus, reason string, trStatus map[string]*v1alpha1.PipelineRunTaskRunStatus) v1alpha1.PipelineRunStatus { - s := tb.PipelineRunStatus( - tb.PipelineRunStatusCondition(apis.Condition{ - Status: status, - Reason: reason, - }), - tb.PipelineRunTaskRunsStatus( - trStatus, - ), - ) - - pr := &v1alpha1.PipelineRun{} - s(pr) - return pr.Status -} - func taskRunsFor(onlyTasks []string, tracker *Tracker) []trh.Run { output := []trh.Run{} for ts := range tracker.Monitor(onlyTasks) { @@ -166,7 +149,7 @@ func taskRunsFor(onlyTasks []string, tracker *Tracker) []trh.Run { return output } -func startPipelineRun(t *testing.T, data test.Data, prStatus ...v1alpha1.PipelineRunStatus) versioned.Interface { +func startPipelineRun(t *testing.T, data pipelinetest.Data, prStatus ...v1alpha1.PipelineRunStatus) versioned.Interface { cs, _ := test.SeedTestData(t, data) // to keep pushing the taskrun over the period(simulate watch) diff --git a/pkg/helper/pods/container_test.go b/pkg/helper/pods/container_test.go index 3c437514f..9689345bc 100644 --- a/pkg/helper/pods/container_test.go +++ b/pkg/helper/pods/container_test.go @@ -46,7 +46,7 @@ func TestContainer_fetch_logs(t *testing.T) { ), ) - cs, _ := pipelinetest.SeedTestData(t, pipelinetest.Data{Pods: ps}) + cs, _ := test.SeedTestData(t, pipelinetest.Data{Pods: ps}) pod := New(podName, ns, cs.Kube, fake.Streamer(logs)) diff --git a/pkg/helper/pods/pod_test.go b/pkg/helper/pods/pod_test.go index 9c74ca778..3f2728b30 100644 --- a/pkg/helper/pods/pod_test.go +++ b/pkg/helper/pods/pod_test.go @@ -4,8 +4,9 @@ import ( "testing" "time" + "github.com/tektoncd/cli/pkg/test" cb "github.com/tektoncd/cli/pkg/test/builder" - "github.com/tektoncd/pipeline/test" + pipelinetest "github.com/tektoncd/pipeline/test" tb "github.com/tektoncd/pipeline/test/builder" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -135,7 +136,7 @@ func simulateAddWatch(t *testing.T, initial *corev1.Pod, later *corev1.Pod) k8s. initial, } - clients, _ := test.SeedTestData(t, test.Data{Pods: ps}) + clients, _ := test.SeedTestData(t, pipelinetest.Data{Pods: ps}) watcher := watch.NewFake() clients.Kube.PrependWatchReactor("pods", k8stest.DefaultWatchReactor(watcher, nil)) @@ -152,7 +153,7 @@ func simulateDeleteWatch(t *testing.T, initial *corev1.Pod, later *corev1.Pod) k initial, } - clients, _ := test.SeedTestData(t, test.Data{Pods: ps}) + clients, _ := test.SeedTestData(t, pipelinetest.Data{Pods: ps}) watcher := watch.NewFake() clients.Kube.PrependWatchReactor("pods", k8stest.DefaultWatchReactor(watcher, nil)) diff --git a/pkg/test/helpers.go b/pkg/test/helpers.go index 49ec8c534..4059d4cdc 100644 --- a/pkg/test/helpers.go +++ b/pkg/test/helpers.go @@ -18,8 +18,15 @@ import ( "testing" "github.com/google/go-cmp/cmp" + "github.com/tektoncd/pipeline/test" + rtesting "knative.dev/pkg/reconciler/testing" ) +func SeedTestData(t *testing.T, d test.Data) (test.Clients, test.Informers) { + ctx, _ := rtesting.SetupFakeContext(t) + return test.SeedTestData(t, ctx, d) +} + func AssertOutput(t *testing.T, expected, actual interface{}) { t.Helper() diff := cmp.Diff(actual, expected) diff --git a/vendor/contrib.go.opencensus.io/exporter/prometheus/.gitignore b/vendor/contrib.go.opencensus.io/exporter/prometheus/.gitignore new file mode 100644 index 000000000..85e7c1dfc --- /dev/null +++ b/vendor/contrib.go.opencensus.io/exporter/prometheus/.gitignore @@ -0,0 +1 @@ +/.idea/ diff --git a/vendor/contrib.go.opencensus.io/exporter/prometheus/.travis.yml b/vendor/contrib.go.opencensus.io/exporter/prometheus/.travis.yml new file mode 100644 index 000000000..957a893db --- /dev/null +++ b/vendor/contrib.go.opencensus.io/exporter/prometheus/.travis.yml @@ -0,0 +1,17 @@ +language: go + +go_import_path: contrib.go.opencensus.io + +go: + - 1.11.x + +env: + global: + GO111MODULE=on + +before_script: + - make install-tools + +script: + - make travis-ci + diff --git a/vendor/github.com/knative/pkg/LICENSE b/vendor/contrib.go.opencensus.io/exporter/prometheus/LICENSE similarity index 100% rename from vendor/github.com/knative/pkg/LICENSE rename to vendor/contrib.go.opencensus.io/exporter/prometheus/LICENSE diff --git a/vendor/contrib.go.opencensus.io/exporter/prometheus/Makefile b/vendor/contrib.go.opencensus.io/exporter/prometheus/Makefile new file mode 100644 index 000000000..2e11d225c --- /dev/null +++ b/vendor/contrib.go.opencensus.io/exporter/prometheus/Makefile @@ -0,0 +1,95 @@ +# TODO: Fix this on windows. +ALL_SRC := $(shell find . -name '*.go' \ + -not -path './vendor/*' \ + -not -path '*/gen-go/*' \ + -type f | sort) +ALL_PKGS := $(shell go list $(sort $(dir $(ALL_SRC)))) + +GOTEST_OPT?=-v -race -timeout 30s +GOTEST_OPT_WITH_COVERAGE = $(GOTEST_OPT) -coverprofile=coverage.txt -covermode=atomic +GOTEST=go test +GOFMT=gofmt +GOLINT=golint +GOVET=go vet +EMBEDMD=embedmd +# TODO decide if we need to change these names. +README_FILES := $(shell find . -name '*README.md' | sort | tr '\n' ' ') + + +.DEFAULT_GOAL := fmt-lint-vet-embedmd-test + +.PHONY: fmt-lint-vet-embedmd-test +fmt-lint-vet-embedmd-test: fmt lint vet embedmd test + +# TODO enable test-with-coverage in tavis +.PHONY: travis-ci +travis-ci: fmt lint vet embedmd test test-386 + +all-pkgs: + @echo $(ALL_PKGS) | tr ' ' '\n' | sort + +all-srcs: + @echo $(ALL_SRC) | tr ' ' '\n' | sort + +.PHONY: test +test: + $(GOTEST) $(GOTEST_OPT) $(ALL_PKGS) + +.PHONY: test-386 +test-386: + GOARCH=386 $(GOTEST) -v -timeout 30s $(ALL_PKGS) + +.PHONY: test-with-coverage +test-with-coverage: + $(GOTEST) $(GOTEST_OPT_WITH_COVERAGE) $(ALL_PKGS) + +.PHONY: fmt +fmt: + @FMTOUT=`$(GOFMT) -s -l $(ALL_SRC) 2>&1`; \ + if [ "$$FMTOUT" ]; then \ + echo "$(GOFMT) FAILED => gofmt the following files:\n"; \ + echo "$$FMTOUT\n"; \ + exit 1; \ + else \ + echo "Fmt finished successfully"; \ + fi + +.PHONY: lint +lint: + @LINTOUT=`$(GOLINT) $(ALL_PKGS) 2>&1`; \ + if [ "$$LINTOUT" ]; then \ + echo "$(GOLINT) FAILED => clean the following lint errors:\n"; \ + echo "$$LINTOUT\n"; \ + exit 1; \ + else \ + echo "Lint finished successfully"; \ + fi + +.PHONY: vet +vet: + # TODO: Understand why go vet downloads "github.com/google/go-cmp v0.2.0" + @VETOUT=`$(GOVET) ./... | grep -v "go: downloading" 2>&1`; \ + if [ "$$VETOUT" ]; then \ + echo "$(GOVET) FAILED => go vet the following files:\n"; \ + echo "$$VETOUT\n"; \ + exit 1; \ + else \ + echo "Vet finished successfully"; \ + fi + +.PHONY: embedmd +embedmd: + @EMBEDMDOUT=`$(EMBEDMD) -d $(README_FILES) 2>&1`; \ + if [ "$$EMBEDMDOUT" ]; then \ + echo "$(EMBEDMD) FAILED => embedmd the following files:\n"; \ + echo "$$EMBEDMDOUT\n"; \ + exit 1; \ + else \ + echo "Embedmd finished successfully"; \ + fi + +.PHONY: install-tools +install-tools: + go get -u golang.org/x/tools/cmd/cover + go get -u golang.org/x/lint/golint + go get -u github.com/rakyll/embedmd diff --git a/vendor/contrib.go.opencensus.io/exporter/prometheus/README.md b/vendor/contrib.go.opencensus.io/exporter/prometheus/README.md new file mode 100644 index 000000000..3a9c5d3c8 --- /dev/null +++ b/vendor/contrib.go.opencensus.io/exporter/prometheus/README.md @@ -0,0 +1,14 @@ +# OpenCensus Go Prometheus Exporter + +[![Build Status](https://travis-ci.org/census-ecosystem/opencensus-go-exporter-prometheus.svg?branch=master)](https://travis-ci.org/census-ecosystem/opencensus-go-exporter-prometheus) [![GoDoc][godoc-image]][godoc-url] + +Provides OpenCensus metrics export support for Prometheus. + +## Installation + +``` +$ go get -u contrib.go.opencensus.io/exporter/prometheus +``` + +[godoc-image]: https://godoc.org/contrib.go.opencensus.io/exporter/prometheus?status.svg +[godoc-url]: https://godoc.org/contrib.go.opencensus.io/exporter/prometheus diff --git a/vendor/contrib.go.opencensus.io/exporter/prometheus/go.mod b/vendor/contrib.go.opencensus.io/exporter/prometheus/go.mod new file mode 100644 index 000000000..af78dfba2 --- /dev/null +++ b/vendor/contrib.go.opencensus.io/exporter/prometheus/go.mod @@ -0,0 +1,6 @@ +module contrib.go.opencensus.io/exporter/prometheus + +require ( + github.com/prometheus/client_golang v0.9.2 + go.opencensus.io v0.21.0 +) diff --git a/vendor/contrib.go.opencensus.io/exporter/prometheus/go.sum b/vendor/contrib.go.opencensus.io/exporter/prometheus/go.sum new file mode 100644 index 000000000..9eba12a55 --- /dev/null +++ b/vendor/contrib.go.opencensus.io/exporter/prometheus/go.sum @@ -0,0 +1,51 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/prometheus/client_golang v0.9.2 h1:awm861/B8OKDd2I/6o1dy3ra4BamzKhYOiGItCeZ740= +github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/common v0.0.0-20181126121408-4724e9255275 h1:PnBWHBf+6L0jOqq0gIVUe6Yk0/QMZ640k6NvkxcBf+8= +github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a h1:9a8MnZMP0X2nLJdBg+pBmGgkJlSaKC2KaQmTCk1XDtE= +github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +go.opencensus.io v0.21.0 h1:mU6zScU4U1YAFPHEHYk+3JC4SY7JxgkqS10ZOSyksNg= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6 h1:bjcUS9ztw9kFmmIxJInhon/0Is3p+EHBKNgquIzo1OI= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/vendor/go.opencensus.io/exporter/prometheus/prometheus.go b/vendor/contrib.go.opencensus.io/exporter/prometheus/prometheus.go similarity index 50% rename from vendor/go.opencensus.io/exporter/prometheus/prometheus.go rename to vendor/contrib.go.opencensus.io/exporter/prometheus/prometheus.go index 203bd38ad..59ce1c0a3 100644 --- a/vendor/go.opencensus.io/exporter/prometheus/prometheus.go +++ b/vendor/contrib.go.opencensus.io/exporter/prometheus/prometheus.go @@ -14,21 +14,20 @@ // Package prometheus contains a Prometheus exporter that supports exporting // OpenCensus views as Prometheus metrics. -package prometheus // import "go.opencensus.io/exporter/prometheus" +package prometheus // import "contrib.go.opencensus.io/exporter/prometheus" import ( - "bytes" "fmt" "log" "net/http" "sync" - "go.opencensus.io/internal" - "go.opencensus.io/stats/view" - "go.opencensus.io/tag" - + "context" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" + "go.opencensus.io/metric/metricdata" + "go.opencensus.io/metric/metricexport" + "go.opencensus.io/stats/view" ) // Exporter exports stats to Prometheus, users need @@ -61,39 +60,12 @@ func NewExporter(o Options) (*Exporter, error) { c: collector, handler: promhttp.HandlerFor(o.Registry, promhttp.HandlerOpts{}), } + collector.ensureRegisteredOnce() + return e, nil } var _ http.Handler = (*Exporter)(nil) -var _ view.Exporter = (*Exporter)(nil) - -func (c *collector) registerViews(views ...*view.View) { - count := 0 - for _, view := range views { - sig := viewSignature(c.opts.Namespace, view) - c.registeredViewsMu.Lock() - _, ok := c.registeredViews[sig] - c.registeredViewsMu.Unlock() - - if !ok { - desc := prometheus.NewDesc( - viewName(c.opts.Namespace, view), - view.Description, - tagKeysToLabels(view.TagKeys), - c.opts.ConstLabels, - ) - c.registeredViewsMu.Lock() - c.registeredViews[sig] = desc - c.registeredViewsMu.Unlock() - count++ - } - } - if count == 0 { - return - } - - c.ensureRegisteredOnce() -} // ensureRegisteredOnce invokes reg.Register on the collector itself // exactly once to ensure that we don't get errors such as @@ -123,11 +95,8 @@ func (o *Options) onError(err error) { // corresponding Prometheus Metric: SumData will be converted // to Untyped Metric, CountData will be a Counter Metric, // DistributionData will be a Histogram Metric. +// Deprecated in lieu of metricexport.Reader interface. func (e *Exporter) ExportView(vd *view.Data) { - if len(vd.Rows) == 0 { - return - } - e.c.addViewData(vd) } // ServeHTTP serves the Prometheus endpoint. @@ -145,151 +114,164 @@ type collector struct { // reg helps collector register views dynamically. reg *prometheus.Registry - // viewData are accumulated and atomically - // appended to on every Export invocation, from - // stats. These views are cleared out when - // Collect is invoked and the cycle is repeated. - viewData map[string]*view.Data - - registeredViewsMu sync.Mutex - // registeredViews maps a view to a prometheus desc. - registeredViews map[string]*prometheus.Desc -} - -func (c *collector) addViewData(vd *view.Data) { - c.registerViews(vd.View) - sig := viewSignature(c.opts.Namespace, vd.View) - - c.mu.Lock() - c.viewData[sig] = vd - c.mu.Unlock() + // reader reads metrics from all registered producers. + reader *metricexport.Reader } func (c *collector) Describe(ch chan<- *prometheus.Desc) { - c.registeredViewsMu.Lock() - registered := make(map[string]*prometheus.Desc) - for k, desc := range c.registeredViews { - registered[k] = desc - } - c.registeredViewsMu.Unlock() - - for _, desc := range registered { - ch <- desc - } + de := &descExporter{c: c, descCh: ch} + c.reader.ReadAndExport(de) } // Collect fetches the statistics from OpenCensus // and delivers them as Prometheus Metrics. -// Collect is invoked everytime a prometheus.Gatherer is run +// Collect is invoked every time a prometheus.Gatherer is run // for example when the HTTP endpoint is invoked by Prometheus. func (c *collector) Collect(ch chan<- prometheus.Metric) { - // We need a copy of all the view data up until this point. - viewData := c.cloneViewData() - - for _, vd := range viewData { - sig := viewSignature(c.opts.Namespace, vd.View) - c.registeredViewsMu.Lock() - desc := c.registeredViews[sig] - c.registeredViewsMu.Unlock() - - for _, row := range vd.Rows { - metric, err := c.toMetric(desc, vd.View, row) - if err != nil { - c.opts.onError(err) - } else { - ch <- metric - } - } - } - + me := &metricExporter{c: c, metricCh: ch} + c.reader.ReadAndExport(me) } -func (c *collector) toMetric(desc *prometheus.Desc, v *view.View, row *view.Row) (prometheus.Metric, error) { - switch data := row.Data.(type) { - case *view.CountData: - return prometheus.NewConstMetric(desc, prometheus.CounterValue, float64(data.Value), tagValues(row.Tags, v.TagKeys)...) - - case *view.DistributionData: - points := make(map[float64]uint64) - // Histograms are cumulative in Prometheus. - // Get cumulative bucket counts. - cumCount := uint64(0) - for i, b := range v.Aggregation.Buckets { - cumCount += uint64(data.CountPerBucket[i]) - points[b] = cumCount - } - return prometheus.NewConstHistogram(desc, uint64(data.Count), data.Sum(), points, tagValues(row.Tags, v.TagKeys)...) +func newCollector(opts Options, registrar *prometheus.Registry) *collector { + return &collector{ + reg: registrar, + opts: opts, + reader: metricexport.NewReader()} +} - case *view.SumData: - return prometheus.NewConstMetric(desc, prometheus.UntypedValue, data.Value, tagValues(row.Tags, v.TagKeys)...) +func (c *collector) toDesc(metric *metricdata.Metric) *prometheus.Desc { + return prometheus.NewDesc( + metricName(c.opts.Namespace, metric), + metric.Descriptor.Description, + toPromLabels(metric.Descriptor.LabelKeys), + c.opts.ConstLabels) +} - case *view.LastValueData: - return prometheus.NewConstMetric(desc, prometheus.GaugeValue, data.Value, tagValues(row.Tags, v.TagKeys)...) +type metricExporter struct { + c *collector + metricCh chan<- prometheus.Metric +} - default: - return nil, fmt.Errorf("aggregation %T is not yet supported", v.Aggregation) +// ExportMetrics exports to the Prometheus. +// Each OpenCensus Metric will be converted to +// corresponding Prometheus Metric: +// TypeCumulativeInt64 and TypeCumulativeFloat64 will be a Counter Metric, +// TypeCumulativeDistribution will be a Histogram Metric. +// TypeGaugeFloat64 and TypeGaugeInt64 will be a Gauge Metric +func (me *metricExporter) ExportMetrics(ctx context.Context, metrics []*metricdata.Metric) error { + for _, metric := range metrics { + desc := me.c.toDesc(metric) + for _, ts := range metric.TimeSeries { + tvs := toLabelValues(ts.LabelValues) + for _, point := range ts.Points { + metric, err := toPromMetric(desc, metric, point, tvs) + if err != nil { + me.c.opts.onError(err) + } else if metric != nil { + me.metricCh <- metric + } + } + } } + return nil } -func tagKeysToLabels(keys []tag.Key) (labels []string) { - for _, key := range keys { - labels = append(labels, internal.Sanitize(key.Name())) - } - return labels +type descExporter struct { + c *collector + descCh chan<- *prometheus.Desc } -func newCollector(opts Options, registrar *prometheus.Registry) *collector { - return &collector{ - reg: registrar, - opts: opts, - registeredViews: make(map[string]*prometheus.Desc), - viewData: make(map[string]*view.Data), +// ExportMetrics exports descriptor to the Prometheus. +// It is invoked when request to scrape descriptors is received. +func (me *descExporter) ExportMetrics(ctx context.Context, metrics []*metricdata.Metric) error { + for _, metric := range metrics { + desc := me.c.toDesc(metric) + me.descCh <- desc } + return nil } -func tagValues(t []tag.Tag, expectedKeys []tag.Key) []string { - var values []string - // Add empty string for all missing keys in the tags map. - idx := 0 - for _, t := range t { - for t.Key != expectedKeys[idx] { - idx++ - values = append(values, "") - } - values = append(values, t.Value) - idx++ +func toPromLabels(mls []metricdata.LabelKey) (labels []string) { + for _, ml := range mls { + labels = append(labels, sanitize(ml.Key)) } - for idx < len(expectedKeys) { - idx++ - values = append(values, "") - } - return values + return labels } -func viewName(namespace string, v *view.View) string { +func metricName(namespace string, m *metricdata.Metric) string { var name string if namespace != "" { name = namespace + "_" } - return name + internal.Sanitize(v.Name) + return name + sanitize(m.Descriptor.Name) } -func viewSignature(namespace string, v *view.View) string { - var buf bytes.Buffer - buf.WriteString(viewName(namespace, v)) - for _, k := range v.TagKeys { - buf.WriteString("-" + k.Name()) +func toPromMetric( + desc *prometheus.Desc, + metric *metricdata.Metric, + point metricdata.Point, + labelValues []string) (prometheus.Metric, error) { + switch metric.Descriptor.Type { + case metricdata.TypeCumulativeFloat64, metricdata.TypeCumulativeInt64: + pv, err := toPromValue(point) + if err != nil { + return nil, err + } + return prometheus.NewConstMetric(desc, prometheus.CounterValue, pv, labelValues...) + + case metricdata.TypeGaugeFloat64, metricdata.TypeGaugeInt64: + pv, err := toPromValue(point) + if err != nil { + return nil, err + } + return prometheus.NewConstMetric(desc, prometheus.GaugeValue, pv, labelValues...) + + case metricdata.TypeCumulativeDistribution: + switch v := point.Value.(type) { + case *metricdata.Distribution: + points := make(map[float64]uint64) + // Histograms are cumulative in Prometheus. + // Get cumulative bucket counts. + cumCount := uint64(0) + for i, b := range v.BucketOptions.Bounds { + cumCount += uint64(v.Buckets[i].Count) + points[b] = cumCount + } + return prometheus.NewConstHistogram(desc, uint64(v.Count), v.Sum, points, labelValues...) + default: + return nil, typeMismatchError(point) + } + case metricdata.TypeSummary: + // TODO: [rghetia] add support for TypeSummary. + return nil, nil + default: + return nil, fmt.Errorf("aggregation %T is not yet supported", metric.Descriptor.Type) } - return buf.String() } -func (c *collector) cloneViewData() map[string]*view.Data { - c.mu.Lock() - defer c.mu.Unlock() +func toLabelValues(labelValues []metricdata.LabelValue) (values []string) { + for _, lv := range labelValues { + if lv.Present { + values = append(values, lv.Value) + } else { + values = append(values, "") + } + } + return values +} + +func typeMismatchError(point metricdata.Point) error { + return fmt.Errorf("point type %T does not match metric type", point) - viewDataCopy := make(map[string]*view.Data) - for sig, viewData := range c.viewData { - viewDataCopy[sig] = viewData +} + +func toPromValue(point metricdata.Point) (float64, error) { + switch v := point.Value.(type) { + case float64: + return v, nil + case int64: + return float64(v), nil + default: + return 0.0, typeMismatchError(point) } - return viewDataCopy } diff --git a/vendor/contrib.go.opencensus.io/exporter/prometheus/sanitize.go b/vendor/contrib.go.opencensus.io/exporter/prometheus/sanitize.go new file mode 100644 index 000000000..ed6d8a14d --- /dev/null +++ b/vendor/contrib.go.opencensus.io/exporter/prometheus/sanitize.go @@ -0,0 +1,50 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "strings" + "unicode" +) + +const labelKeySizeLimit = 100 + +// sanitize returns a string that is trunacated to 100 characters if it's too +// long, and replaces non-alphanumeric characters to underscores. +func sanitize(s string) string { + if len(s) == 0 { + return s + } + if len(s) > labelKeySizeLimit { + s = s[:labelKeySizeLimit] + } + s = strings.Map(sanitizeRune, s) + if unicode.IsDigit(rune(s[0])) { + s = "key_" + s + } + if s[0] == '_' { + s = "key" + s + } + return s +} + +// converts anything that is not a letter or digit to an underscore +func sanitizeRune(r rune) rune { + if unicode.IsLetter(r) || unicode.IsDigit(r) { + return r + } + // Everything else turns into an underscore + return '_' +} diff --git a/vendor/github.com/golang/groupcache/LICENSE b/vendor/github.com/golang/groupcache/LICENSE new file mode 100644 index 000000000..37ec93a14 --- /dev/null +++ b/vendor/github.com/golang/groupcache/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/golang/groupcache/lru/lru.go b/vendor/github.com/golang/groupcache/lru/lru.go new file mode 100644 index 000000000..eac1c7664 --- /dev/null +++ b/vendor/github.com/golang/groupcache/lru/lru.go @@ -0,0 +1,133 @@ +/* +Copyright 2013 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package lru implements an LRU cache. +package lru + +import "container/list" + +// Cache is an LRU cache. It is not safe for concurrent access. +type Cache struct { + // MaxEntries is the maximum number of cache entries before + // an item is evicted. Zero means no limit. + MaxEntries int + + // OnEvicted optionally specifies a callback function to be + // executed when an entry is purged from the cache. + OnEvicted func(key Key, value interface{}) + + ll *list.List + cache map[interface{}]*list.Element +} + +// A Key may be any value that is comparable. See http://golang.org/ref/spec#Comparison_operators +type Key interface{} + +type entry struct { + key Key + value interface{} +} + +// New creates a new Cache. +// If maxEntries is zero, the cache has no limit and it's assumed +// that eviction is done by the caller. +func New(maxEntries int) *Cache { + return &Cache{ + MaxEntries: maxEntries, + ll: list.New(), + cache: make(map[interface{}]*list.Element), + } +} + +// Add adds a value to the cache. +func (c *Cache) Add(key Key, value interface{}) { + if c.cache == nil { + c.cache = make(map[interface{}]*list.Element) + c.ll = list.New() + } + if ee, ok := c.cache[key]; ok { + c.ll.MoveToFront(ee) + ee.Value.(*entry).value = value + return + } + ele := c.ll.PushFront(&entry{key, value}) + c.cache[key] = ele + if c.MaxEntries != 0 && c.ll.Len() > c.MaxEntries { + c.RemoveOldest() + } +} + +// Get looks up a key's value from the cache. +func (c *Cache) Get(key Key) (value interface{}, ok bool) { + if c.cache == nil { + return + } + if ele, hit := c.cache[key]; hit { + c.ll.MoveToFront(ele) + return ele.Value.(*entry).value, true + } + return +} + +// Remove removes the provided key from the cache. +func (c *Cache) Remove(key Key) { + if c.cache == nil { + return + } + if ele, hit := c.cache[key]; hit { + c.removeElement(ele) + } +} + +// RemoveOldest removes the oldest item from the cache. +func (c *Cache) RemoveOldest() { + if c.cache == nil { + return + } + ele := c.ll.Back() + if ele != nil { + c.removeElement(ele) + } +} + +func (c *Cache) removeElement(e *list.Element) { + c.ll.Remove(e) + kv := e.Value.(*entry) + delete(c.cache, kv.key) + if c.OnEvicted != nil { + c.OnEvicted(kv.key, kv.value) + } +} + +// Len returns the number of items in the cache. +func (c *Cache) Len() int { + if c.cache == nil { + return 0 + } + return c.ll.Len() +} + +// Clear purges all stored items from the cache. +func (c *Cache) Clear() { + if c.OnEvicted != nil { + for _, e := range c.cache { + kv := e.Value.(*entry) + c.OnEvicted(kv.key, kv.value) + } + } + c.ll = nil + c.cache = nil +} diff --git a/vendor/github.com/google/go-cmp/cmp/cmpopts/equate.go b/vendor/github.com/google/go-cmp/cmp/cmpopts/equate.go new file mode 100644 index 000000000..41bbddc61 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/cmpopts/equate.go @@ -0,0 +1,89 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// Package cmpopts provides common options for the cmp package. +package cmpopts + +import ( + "math" + "reflect" + + "github.com/google/go-cmp/cmp" +) + +func equateAlways(_, _ interface{}) bool { return true } + +// EquateEmpty returns a Comparer option that determines all maps and slices +// with a length of zero to be equal, regardless of whether they are nil. +// +// EquateEmpty can be used in conjunction with SortSlices and SortMaps. +func EquateEmpty() cmp.Option { + return cmp.FilterValues(isEmpty, cmp.Comparer(equateAlways)) +} + +func isEmpty(x, y interface{}) bool { + vx, vy := reflect.ValueOf(x), reflect.ValueOf(y) + return (x != nil && y != nil && vx.Type() == vy.Type()) && + (vx.Kind() == reflect.Slice || vx.Kind() == reflect.Map) && + (vx.Len() == 0 && vy.Len() == 0) +} + +// EquateApprox returns a Comparer option that determines float32 or float64 +// values to be equal if they are within a relative fraction or absolute margin. +// This option is not used when either x or y is NaN or infinite. +// +// The fraction determines that the difference of two values must be within the +// smaller fraction of the two values, while the margin determines that the two +// values must be within some absolute margin. +// To express only a fraction or only a margin, use 0 for the other parameter. +// The fraction and margin must be non-negative. +// +// The mathematical expression used is equivalent to: +// |x-y| ≤ max(fraction*min(|x|, |y|), margin) +// +// EquateApprox can be used in conjunction with EquateNaNs. +func EquateApprox(fraction, margin float64) cmp.Option { + if margin < 0 || fraction < 0 || math.IsNaN(margin) || math.IsNaN(fraction) { + panic("margin or fraction must be a non-negative number") + } + a := approximator{fraction, margin} + return cmp.Options{ + cmp.FilterValues(areRealF64s, cmp.Comparer(a.compareF64)), + cmp.FilterValues(areRealF32s, cmp.Comparer(a.compareF32)), + } +} + +type approximator struct{ frac, marg float64 } + +func areRealF64s(x, y float64) bool { + return !math.IsNaN(x) && !math.IsNaN(y) && !math.IsInf(x, 0) && !math.IsInf(y, 0) +} +func areRealF32s(x, y float32) bool { + return areRealF64s(float64(x), float64(y)) +} +func (a approximator) compareF64(x, y float64) bool { + relMarg := a.frac * math.Min(math.Abs(x), math.Abs(y)) + return math.Abs(x-y) <= math.Max(a.marg, relMarg) +} +func (a approximator) compareF32(x, y float32) bool { + return a.compareF64(float64(x), float64(y)) +} + +// EquateNaNs returns a Comparer option that determines float32 and float64 +// NaN values to be equal. +// +// EquateNaNs can be used in conjunction with EquateApprox. +func EquateNaNs() cmp.Option { + return cmp.Options{ + cmp.FilterValues(areNaNsF64s, cmp.Comparer(equateAlways)), + cmp.FilterValues(areNaNsF32s, cmp.Comparer(equateAlways)), + } +} + +func areNaNsF64s(x, y float64) bool { + return math.IsNaN(x) && math.IsNaN(y) +} +func areNaNsF32s(x, y float32) bool { + return areNaNsF64s(float64(x), float64(y)) +} diff --git a/vendor/github.com/google/go-cmp/cmp/cmpopts/ignore.go b/vendor/github.com/google/go-cmp/cmp/cmpopts/ignore.go new file mode 100644 index 000000000..ff8e785d4 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/cmpopts/ignore.go @@ -0,0 +1,207 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmpopts + +import ( + "fmt" + "reflect" + "unicode" + "unicode/utf8" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/internal/function" +) + +// IgnoreFields returns an Option that ignores exported fields of the +// given names on a single struct type. +// The struct type is specified by passing in a value of that type. +// +// The name may be a dot-delimited string (e.g., "Foo.Bar") to ignore a +// specific sub-field that is embedded or nested within the parent struct. +// +// This does not handle unexported fields; use IgnoreUnexported instead. +func IgnoreFields(typ interface{}, names ...string) cmp.Option { + sf := newStructFilter(typ, names...) + return cmp.FilterPath(sf.filter, cmp.Ignore()) +} + +// IgnoreTypes returns an Option that ignores all values assignable to +// certain types, which are specified by passing in a value of each type. +func IgnoreTypes(typs ...interface{}) cmp.Option { + tf := newTypeFilter(typs...) + return cmp.FilterPath(tf.filter, cmp.Ignore()) +} + +type typeFilter []reflect.Type + +func newTypeFilter(typs ...interface{}) (tf typeFilter) { + for _, typ := range typs { + t := reflect.TypeOf(typ) + if t == nil { + // This occurs if someone tries to pass in sync.Locker(nil) + panic("cannot determine type; consider using IgnoreInterfaces") + } + tf = append(tf, t) + } + return tf +} +func (tf typeFilter) filter(p cmp.Path) bool { + if len(p) < 1 { + return false + } + t := p.Last().Type() + for _, ti := range tf { + if t.AssignableTo(ti) { + return true + } + } + return false +} + +// IgnoreInterfaces returns an Option that ignores all values or references of +// values assignable to certain interface types. These interfaces are specified +// by passing in an anonymous struct with the interface types embedded in it. +// For example, to ignore sync.Locker, pass in struct{sync.Locker}{}. +func IgnoreInterfaces(ifaces interface{}) cmp.Option { + tf := newIfaceFilter(ifaces) + return cmp.FilterPath(tf.filter, cmp.Ignore()) +} + +type ifaceFilter []reflect.Type + +func newIfaceFilter(ifaces interface{}) (tf ifaceFilter) { + t := reflect.TypeOf(ifaces) + if ifaces == nil || t.Name() != "" || t.Kind() != reflect.Struct { + panic("input must be an anonymous struct") + } + for i := 0; i < t.NumField(); i++ { + fi := t.Field(i) + switch { + case !fi.Anonymous: + panic("struct cannot have named fields") + case fi.Type.Kind() != reflect.Interface: + panic("embedded field must be an interface type") + case fi.Type.NumMethod() == 0: + // This matches everything; why would you ever want this? + panic("cannot ignore empty interface") + default: + tf = append(tf, fi.Type) + } + } + return tf +} +func (tf ifaceFilter) filter(p cmp.Path) bool { + if len(p) < 1 { + return false + } + t := p.Last().Type() + for _, ti := range tf { + if t.AssignableTo(ti) { + return true + } + if t.Kind() != reflect.Ptr && reflect.PtrTo(t).AssignableTo(ti) { + return true + } + } + return false +} + +// IgnoreUnexported returns an Option that only ignores the immediate unexported +// fields of a struct, including anonymous fields of unexported types. +// In particular, unexported fields within the struct's exported fields +// of struct types, including anonymous fields, will not be ignored unless the +// type of the field itself is also passed to IgnoreUnexported. +// +// Avoid ignoring unexported fields of a type which you do not control (i.e. a +// type from another repository), as changes to the implementation of such types +// may change how the comparison behaves. Prefer a custom Comparer instead. +func IgnoreUnexported(typs ...interface{}) cmp.Option { + ux := newUnexportedFilter(typs...) + return cmp.FilterPath(ux.filter, cmp.Ignore()) +} + +type unexportedFilter struct{ m map[reflect.Type]bool } + +func newUnexportedFilter(typs ...interface{}) unexportedFilter { + ux := unexportedFilter{m: make(map[reflect.Type]bool)} + for _, typ := range typs { + t := reflect.TypeOf(typ) + if t == nil || t.Kind() != reflect.Struct { + panic(fmt.Sprintf("invalid struct type: %T", typ)) + } + ux.m[t] = true + } + return ux +} +func (xf unexportedFilter) filter(p cmp.Path) bool { + sf, ok := p.Index(-1).(cmp.StructField) + if !ok { + return false + } + return xf.m[p.Index(-2).Type()] && !isExported(sf.Name()) +} + +// isExported reports whether the identifier is exported. +func isExported(id string) bool { + r, _ := utf8.DecodeRuneInString(id) + return unicode.IsUpper(r) +} + +// IgnoreSliceElements returns an Option that ignores elements of []V. +// The discard function must be of the form "func(T) bool" which is used to +// ignore slice elements of type V, where V is assignable to T. +// Elements are ignored if the function reports true. +func IgnoreSliceElements(discardFunc interface{}) cmp.Option { + vf := reflect.ValueOf(discardFunc) + if !function.IsType(vf.Type(), function.ValuePredicate) || vf.IsNil() { + panic(fmt.Sprintf("invalid discard function: %T", discardFunc)) + } + return cmp.FilterPath(func(p cmp.Path) bool { + si, ok := p.Index(-1).(cmp.SliceIndex) + if !ok { + return false + } + if !si.Type().AssignableTo(vf.Type().In(0)) { + return false + } + vx, vy := si.Values() + if vx.IsValid() && vf.Call([]reflect.Value{vx})[0].Bool() { + return true + } + if vy.IsValid() && vf.Call([]reflect.Value{vy})[0].Bool() { + return true + } + return false + }, cmp.Ignore()) +} + +// IgnoreMapEntries returns an Option that ignores entries of map[K]V. +// The discard function must be of the form "func(T, R) bool" which is used to +// ignore map entries of type K and V, where K and V are assignable to T and R. +// Entries are ignored if the function reports true. +func IgnoreMapEntries(discardFunc interface{}) cmp.Option { + vf := reflect.ValueOf(discardFunc) + if !function.IsType(vf.Type(), function.KeyValuePredicate) || vf.IsNil() { + panic(fmt.Sprintf("invalid discard function: %T", discardFunc)) + } + return cmp.FilterPath(func(p cmp.Path) bool { + mi, ok := p.Index(-1).(cmp.MapIndex) + if !ok { + return false + } + if !mi.Key().Type().AssignableTo(vf.Type().In(0)) || !mi.Type().AssignableTo(vf.Type().In(1)) { + return false + } + k := mi.Key() + vx, vy := mi.Values() + if vx.IsValid() && vf.Call([]reflect.Value{k, vx})[0].Bool() { + return true + } + if vy.IsValid() && vf.Call([]reflect.Value{k, vy})[0].Bool() { + return true + } + return false + }, cmp.Ignore()) +} diff --git a/vendor/github.com/google/go-cmp/cmp/cmpopts/sort.go b/vendor/github.com/google/go-cmp/cmp/cmpopts/sort.go new file mode 100644 index 000000000..3a4804621 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/cmpopts/sort.go @@ -0,0 +1,147 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmpopts + +import ( + "fmt" + "reflect" + "sort" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/internal/function" +) + +// SortSlices returns a Transformer option that sorts all []V. +// The less function must be of the form "func(T, T) bool" which is used to +// sort any slice with element type V that is assignable to T. +// +// The less function must be: +// • Deterministic: less(x, y) == less(x, y) +// • Irreflexive: !less(x, x) +// • Transitive: if !less(x, y) and !less(y, z), then !less(x, z) +// +// The less function does not have to be "total". That is, if !less(x, y) and +// !less(y, x) for two elements x and y, their relative order is maintained. +// +// SortSlices can be used in conjunction with EquateEmpty. +func SortSlices(lessFunc interface{}) cmp.Option { + vf := reflect.ValueOf(lessFunc) + if !function.IsType(vf.Type(), function.Less) || vf.IsNil() { + panic(fmt.Sprintf("invalid less function: %T", lessFunc)) + } + ss := sliceSorter{vf.Type().In(0), vf} + return cmp.FilterValues(ss.filter, cmp.Transformer("cmpopts.SortSlices", ss.sort)) +} + +type sliceSorter struct { + in reflect.Type // T + fnc reflect.Value // func(T, T) bool +} + +func (ss sliceSorter) filter(x, y interface{}) bool { + vx, vy := reflect.ValueOf(x), reflect.ValueOf(y) + if !(x != nil && y != nil && vx.Type() == vy.Type()) || + !(vx.Kind() == reflect.Slice && vx.Type().Elem().AssignableTo(ss.in)) || + (vx.Len() <= 1 && vy.Len() <= 1) { + return false + } + // Check whether the slices are already sorted to avoid an infinite + // recursion cycle applying the same transform to itself. + ok1 := sort.SliceIsSorted(x, func(i, j int) bool { return ss.less(vx, i, j) }) + ok2 := sort.SliceIsSorted(y, func(i, j int) bool { return ss.less(vy, i, j) }) + return !ok1 || !ok2 +} +func (ss sliceSorter) sort(x interface{}) interface{} { + src := reflect.ValueOf(x) + dst := reflect.MakeSlice(src.Type(), src.Len(), src.Len()) + for i := 0; i < src.Len(); i++ { + dst.Index(i).Set(src.Index(i)) + } + sort.SliceStable(dst.Interface(), func(i, j int) bool { return ss.less(dst, i, j) }) + ss.checkSort(dst) + return dst.Interface() +} +func (ss sliceSorter) checkSort(v reflect.Value) { + start := -1 // Start of a sequence of equal elements. + for i := 1; i < v.Len(); i++ { + if ss.less(v, i-1, i) { + // Check that first and last elements in v[start:i] are equal. + if start >= 0 && (ss.less(v, start, i-1) || ss.less(v, i-1, start)) { + panic(fmt.Sprintf("incomparable values detected: want equal elements: %v", v.Slice(start, i))) + } + start = -1 + } else if start == -1 { + start = i + } + } +} +func (ss sliceSorter) less(v reflect.Value, i, j int) bool { + vx, vy := v.Index(i), v.Index(j) + return ss.fnc.Call([]reflect.Value{vx, vy})[0].Bool() +} + +// SortMaps returns a Transformer option that flattens map[K]V types to be a +// sorted []struct{K, V}. The less function must be of the form +// "func(T, T) bool" which is used to sort any map with key K that is +// assignable to T. +// +// Flattening the map into a slice has the property that cmp.Equal is able to +// use Comparers on K or the K.Equal method if it exists. +// +// The less function must be: +// • Deterministic: less(x, y) == less(x, y) +// • Irreflexive: !less(x, x) +// • Transitive: if !less(x, y) and !less(y, z), then !less(x, z) +// • Total: if x != y, then either less(x, y) or less(y, x) +// +// SortMaps can be used in conjunction with EquateEmpty. +func SortMaps(lessFunc interface{}) cmp.Option { + vf := reflect.ValueOf(lessFunc) + if !function.IsType(vf.Type(), function.Less) || vf.IsNil() { + panic(fmt.Sprintf("invalid less function: %T", lessFunc)) + } + ms := mapSorter{vf.Type().In(0), vf} + return cmp.FilterValues(ms.filter, cmp.Transformer("cmpopts.SortMaps", ms.sort)) +} + +type mapSorter struct { + in reflect.Type // T + fnc reflect.Value // func(T, T) bool +} + +func (ms mapSorter) filter(x, y interface{}) bool { + vx, vy := reflect.ValueOf(x), reflect.ValueOf(y) + return (x != nil && y != nil && vx.Type() == vy.Type()) && + (vx.Kind() == reflect.Map && vx.Type().Key().AssignableTo(ms.in)) && + (vx.Len() != 0 || vy.Len() != 0) +} +func (ms mapSorter) sort(x interface{}) interface{} { + src := reflect.ValueOf(x) + outType := reflect.StructOf([]reflect.StructField{ + {Name: "K", Type: src.Type().Key()}, + {Name: "V", Type: src.Type().Elem()}, + }) + dst := reflect.MakeSlice(reflect.SliceOf(outType), src.Len(), src.Len()) + for i, k := range src.MapKeys() { + v := reflect.New(outType).Elem() + v.Field(0).Set(k) + v.Field(1).Set(src.MapIndex(k)) + dst.Index(i).Set(v) + } + sort.Slice(dst.Interface(), func(i, j int) bool { return ms.less(dst, i, j) }) + ms.checkSort(dst) + return dst.Interface() +} +func (ms mapSorter) checkSort(v reflect.Value) { + for i := 1; i < v.Len(); i++ { + if !ms.less(v, i-1, i) { + panic(fmt.Sprintf("partial order detected: want %v < %v", v.Index(i-1), v.Index(i))) + } + } +} +func (ms mapSorter) less(v reflect.Value, i, j int) bool { + vx, vy := v.Index(i).Field(0), v.Index(j).Field(0) + return ms.fnc.Call([]reflect.Value{vx, vy})[0].Bool() +} diff --git a/vendor/github.com/google/go-cmp/cmp/cmpopts/struct_filter.go b/vendor/github.com/google/go-cmp/cmp/cmpopts/struct_filter.go new file mode 100644 index 000000000..97f707983 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/cmpopts/struct_filter.go @@ -0,0 +1,182 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmpopts + +import ( + "fmt" + "reflect" + "strings" + + "github.com/google/go-cmp/cmp" +) + +// filterField returns a new Option where opt is only evaluated on paths that +// include a specific exported field on a single struct type. +// The struct type is specified by passing in a value of that type. +// +// The name may be a dot-delimited string (e.g., "Foo.Bar") to select a +// specific sub-field that is embedded or nested within the parent struct. +func filterField(typ interface{}, name string, opt cmp.Option) cmp.Option { + // TODO: This is currently unexported over concerns of how helper filters + // can be composed together easily. + // TODO: Add tests for FilterField. + + sf := newStructFilter(typ, name) + return cmp.FilterPath(sf.filter, opt) +} + +type structFilter struct { + t reflect.Type // The root struct type to match on + ft fieldTree // Tree of fields to match on +} + +func newStructFilter(typ interface{}, names ...string) structFilter { + // TODO: Perhaps allow * as a special identifier to allow ignoring any + // number of path steps until the next field match? + // This could be useful when a concrete struct gets transformed into + // an anonymous struct where it is not possible to specify that by type, + // but the transformer happens to provide guarantees about the names of + // the transformed fields. + + t := reflect.TypeOf(typ) + if t == nil || t.Kind() != reflect.Struct { + panic(fmt.Sprintf("%T must be a struct", typ)) + } + var ft fieldTree + for _, name := range names { + cname, err := canonicalName(t, name) + if err != nil { + panic(fmt.Sprintf("%s: %v", strings.Join(cname, "."), err)) + } + ft.insert(cname) + } + return structFilter{t, ft} +} + +func (sf structFilter) filter(p cmp.Path) bool { + for i, ps := range p { + if ps.Type().AssignableTo(sf.t) && sf.ft.matchPrefix(p[i+1:]) { + return true + } + } + return false +} + +// fieldTree represents a set of dot-separated identifiers. +// +// For example, inserting the following selectors: +// Foo +// Foo.Bar.Baz +// Foo.Buzz +// Nuka.Cola.Quantum +// +// Results in a tree of the form: +// {sub: { +// "Foo": {ok: true, sub: { +// "Bar": {sub: { +// "Baz": {ok: true}, +// }}, +// "Buzz": {ok: true}, +// }}, +// "Nuka": {sub: { +// "Cola": {sub: { +// "Quantum": {ok: true}, +// }}, +// }}, +// }} +type fieldTree struct { + ok bool // Whether this is a specified node + sub map[string]fieldTree // The sub-tree of fields under this node +} + +// insert inserts a sequence of field accesses into the tree. +func (ft *fieldTree) insert(cname []string) { + if ft.sub == nil { + ft.sub = make(map[string]fieldTree) + } + if len(cname) == 0 { + ft.ok = true + return + } + sub := ft.sub[cname[0]] + sub.insert(cname[1:]) + ft.sub[cname[0]] = sub +} + +// matchPrefix reports whether any selector in the fieldTree matches +// the start of path p. +func (ft fieldTree) matchPrefix(p cmp.Path) bool { + for _, ps := range p { + switch ps := ps.(type) { + case cmp.StructField: + ft = ft.sub[ps.Name()] + if ft.ok { + return true + } + if len(ft.sub) == 0 { + return false + } + case cmp.Indirect: + default: + return false + } + } + return false +} + +// canonicalName returns a list of identifiers where any struct field access +// through an embedded field is expanded to include the names of the embedded +// types themselves. +// +// For example, suppose field "Foo" is not directly in the parent struct, +// but actually from an embedded struct of type "Bar". Then, the canonical name +// of "Foo" is actually "Bar.Foo". +// +// Suppose field "Foo" is not directly in the parent struct, but actually +// a field in two different embedded structs of types "Bar" and "Baz". +// Then the selector "Foo" causes a panic since it is ambiguous which one it +// refers to. The user must specify either "Bar.Foo" or "Baz.Foo". +func canonicalName(t reflect.Type, sel string) ([]string, error) { + var name string + sel = strings.TrimPrefix(sel, ".") + if sel == "" { + return nil, fmt.Errorf("name must not be empty") + } + if i := strings.IndexByte(sel, '.'); i < 0 { + name, sel = sel, "" + } else { + name, sel = sel[:i], sel[i:] + } + + // Type must be a struct or pointer to struct. + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + if t.Kind() != reflect.Struct { + return nil, fmt.Errorf("%v must be a struct", t) + } + + // Find the canonical name for this current field name. + // If the field exists in an embedded struct, then it will be expanded. + if !isExported(name) { + // Disallow unexported fields: + // * To discourage people from actually touching unexported fields + // * FieldByName is buggy (https://golang.org/issue/4876) + return []string{name}, fmt.Errorf("name must be exported") + } + sf, ok := t.FieldByName(name) + if !ok { + return []string{name}, fmt.Errorf("does not exist") + } + var ss []string + for i := range sf.Index { + ss = append(ss, t.FieldByIndex(sf.Index[:i+1]).Name) + } + if sel == "" { + return ss, nil + } + ssPost, err := canonicalName(sf.Type, sel) + return append(ss, ssPost...), err +} diff --git a/vendor/github.com/google/go-cmp/cmp/cmpopts/xform.go b/vendor/github.com/google/go-cmp/cmp/cmpopts/xform.go new file mode 100644 index 000000000..9d651553d --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/cmpopts/xform.go @@ -0,0 +1,35 @@ +// Copyright 2018, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmpopts + +import ( + "github.com/google/go-cmp/cmp" +) + +type xformFilter struct{ xform cmp.Option } + +func (xf xformFilter) filter(p cmp.Path) bool { + for _, ps := range p { + if t, ok := ps.(cmp.Transform); ok && t.Option() == xf.xform { + return false + } + } + return true +} + +// AcyclicTransformer returns a Transformer with a filter applied that ensures +// that the transformer cannot be recursively applied upon its own output. +// +// An example use case is a transformer that splits a string by lines: +// AcyclicTransformer("SplitLines", func(s string) []string{ +// return strings.Split(s, "\n") +// }) +// +// Had this been an unfiltered Transformer instead, this would result in an +// infinite cycle converting a string to []string to [][]string and so on. +func AcyclicTransformer(name string, xformFunc interface{}) cmp.Option { + xf := xformFilter{cmp.Transformer(name, xformFunc)} + return cmp.FilterPath(xf.filter, xf.xform) +} diff --git a/vendor/github.com/google/go-cmp/cmp/compare.go b/vendor/github.com/google/go-cmp/cmp/compare.go index 7e215f220..2133562b0 100644 --- a/vendor/github.com/google/go-cmp/cmp/compare.go +++ b/vendor/github.com/google/go-cmp/cmp/compare.go @@ -29,26 +29,17 @@ package cmp import ( "fmt" "reflect" + "strings" "github.com/google/go-cmp/cmp/internal/diff" + "github.com/google/go-cmp/cmp/internal/flags" "github.com/google/go-cmp/cmp/internal/function" "github.com/google/go-cmp/cmp/internal/value" ) -// BUG(dsnet): Maps with keys containing NaN values cannot be properly compared due to -// the reflection package's inability to retrieve such entries. Equal will panic -// anytime it comes across a NaN key, but this behavior may change. -// -// See https://golang.org/issue/11104 for more details. - -var nothing = reflect.Value{} - // Equal reports whether x and y are equal by recursively applying the // following rules in the given order to x and y and all of their sub-values: // -// • If two values are not of the same type, then they are never equal -// and the overall result is false. -// // • Let S be the set of all Ignore, Transformer, and Comparer options that // remain after applying all path filters, value filters, and type filters. // If at least one Ignore exists in S, then the comparison is ignored. @@ -61,43 +52,79 @@ var nothing = reflect.Value{} // // • If the values have an Equal method of the form "(T) Equal(T) bool" or // "(T) Equal(I) bool" where T is assignable to I, then use the result of -// x.Equal(y) even if x or y is nil. -// Otherwise, no such method exists and evaluation proceeds to the next rule. +// x.Equal(y) even if x or y is nil. Otherwise, no such method exists and +// evaluation proceeds to the next rule. // // • Lastly, try to compare x and y based on their basic kinds. // Simple kinds like booleans, integers, floats, complex numbers, strings, and // channels are compared using the equivalent of the == operator in Go. // Functions are only equal if they are both nil, otherwise they are unequal. -// Pointers are equal if the underlying values they point to are also equal. -// Interfaces are equal if their underlying concrete values are also equal. // -// Structs are equal if all of their fields are equal. If a struct contains -// unexported fields, Equal panics unless the AllowUnexported option is used or -// an Ignore option (e.g., cmpopts.IgnoreUnexported) ignores that field. +// Structs are equal if recursively calling Equal on all fields report equal. +// If a struct contains unexported fields, Equal panics unless an Ignore option +// (e.g., cmpopts.IgnoreUnexported) ignores that field or the AllowUnexported +// option explicitly permits comparing the unexported field. +// +// Slices are equal if they are both nil or both non-nil, where recursively +// calling Equal on all non-ignored slice or array elements report equal. +// Empty non-nil slices and nil slices are not equal; to equate empty slices, +// consider using cmpopts.EquateEmpty. // -// Arrays, slices, and maps are equal if they are both nil or both non-nil -// with the same length and the elements at each index or key are equal. -// Note that a non-nil empty slice and a nil slice are not equal. -// To equate empty slices and maps, consider using cmpopts.EquateEmpty. +// Maps are equal if they are both nil or both non-nil, where recursively +// calling Equal on all non-ignored map entries report equal. // Map keys are equal according to the == operator. // To use custom comparisons for map keys, consider using cmpopts.SortMaps. +// Empty non-nil maps and nil maps are not equal; to equate empty maps, +// consider using cmpopts.EquateEmpty. +// +// Pointers and interfaces are equal if they are both nil or both non-nil, +// where they have the same underlying concrete type and recursively +// calling Equal on the underlying values reports equal. func Equal(x, y interface{}, opts ...Option) bool { + vx := reflect.ValueOf(x) + vy := reflect.ValueOf(y) + + // If the inputs are different types, auto-wrap them in an empty interface + // so that they have the same parent type. + var t reflect.Type + if !vx.IsValid() || !vy.IsValid() || vx.Type() != vy.Type() { + t = reflect.TypeOf((*interface{})(nil)).Elem() + if vx.IsValid() { + vvx := reflect.New(t).Elem() + vvx.Set(vx) + vx = vvx + } + if vy.IsValid() { + vvy := reflect.New(t).Elem() + vvy.Set(vy) + vy = vvy + } + } else { + t = vx.Type() + } + s := newState(opts) - s.compareAny(reflect.ValueOf(x), reflect.ValueOf(y)) + s.compareAny(&pathStep{t, vx, vy}) return s.result.Equal() } // Diff returns a human-readable report of the differences between two values. // It returns an empty string if and only if Equal returns true for the same -// input values and options. The output string will use the "-" symbol to -// indicate elements removed from x, and the "+" symbol to indicate elements -// added to y. +// input values and options. +// +// The output is displayed as a literal in pseudo-Go syntax. +// At the start of each line, a "-" prefix indicates an element removed from x, +// a "+" prefix to indicates an element added to y, and the lack of a prefix +// indicates an element common to both x and y. If possible, the output +// uses fmt.Stringer.String or error.Error methods to produce more humanly +// readable outputs. In such cases, the string is prefixed with either an +// 's' or 'e' character, respectively, to indicate that the method was called. // -// Do not depend on this output being stable. +// Do not depend on this output being stable. If you need the ability to +// programmatically interpret the difference, consider using a custom Reporter. func Diff(x, y interface{}, opts ...Option) string { r := new(defaultReporter) - opts = Options{Options(opts), r} - eq := Equal(x, y, opts...) + eq := Equal(x, y, Options(opts), Reporter(r)) d := r.String() if (d == "") != eq { panic("inconsistent difference and equality results") @@ -108,9 +135,13 @@ func Diff(x, y interface{}, opts ...Option) string { type state struct { // These fields represent the "comparison state". // Calling statelessCompare must not result in observable changes to these. - result diff.Result // The current result of comparison - curPath Path // The current path in the value tree - reporter reporter // Optional reporter used for difference formatting + result diff.Result // The current result of comparison + curPath Path // The current path in the value tree + reporters []reporter // Optional reporters + + // recChecker checks for infinite cycles applying the same set of + // transformers upon the output of itself. + recChecker recChecker // dynChecker triggers pseudo-random checks for option correctness. // It is safe for statelessCompare to mutate this value. @@ -122,10 +153,9 @@ type state struct { } func newState(opts []Option) *state { - s := new(state) - for _, opt := range opts { - s.processOption(opt) - } + // Always ensure a validator option exists to validate the inputs. + s := &state{opts: Options{validator{}}} + s.processOption(Options(opts)) return s } @@ -152,10 +182,7 @@ func (s *state) processOption(opt Option) { s.exporters[t] = true } case reporter: - if s.reporter != nil { - panic("difference reporter already registered") - } - s.reporter = opt + s.reporters = append(s.reporters, opt) default: panic(fmt.Sprintf("unknown option %T", opt)) } @@ -164,153 +191,88 @@ func (s *state) processOption(opt Option) { // statelessCompare compares two values and returns the result. // This function is stateless in that it does not alter the current result, // or output to any registered reporters. -func (s *state) statelessCompare(vx, vy reflect.Value) diff.Result { +func (s *state) statelessCompare(step PathStep) diff.Result { // We do not save and restore the curPath because all of the compareX // methods should properly push and pop from the path. // It is an implementation bug if the contents of curPath differs from // when calling this function to when returning from it. - oldResult, oldReporter := s.result, s.reporter + oldResult, oldReporters := s.result, s.reporters s.result = diff.Result{} // Reset result - s.reporter = nil // Remove reporter to avoid spurious printouts - s.compareAny(vx, vy) + s.reporters = nil // Remove reporters to avoid spurious printouts + s.compareAny(step) res := s.result - s.result, s.reporter = oldResult, oldReporter + s.result, s.reporters = oldResult, oldReporters return res } -func (s *state) compareAny(vx, vy reflect.Value) { - // TODO: Support cyclic data structures. - - // Rule 0: Differing types are never equal. - if !vx.IsValid() || !vy.IsValid() { - s.report(vx.IsValid() == vy.IsValid(), vx, vy) - return - } - if vx.Type() != vy.Type() { - s.report(false, vx, vy) // Possible for path to be empty - return - } - t := vx.Type() - if len(s.curPath) == 0 { - s.curPath.push(&pathStep{typ: t}) - defer s.curPath.pop() +func (s *state) compareAny(step PathStep) { + // Update the path stack. + s.curPath.push(step) + defer s.curPath.pop() + for _, r := range s.reporters { + r.PushStep(step) + defer r.PopStep() } - vx, vy = s.tryExporting(vx, vy) + s.recChecker.Check(s.curPath) + + // Obtain the current type and values. + t := step.Type() + vx, vy := step.Values() // Rule 1: Check whether an option applies on this node in the value tree. - if s.tryOptions(vx, vy, t) { + if s.tryOptions(t, vx, vy) { return } // Rule 2: Check whether the type has a valid Equal method. - if s.tryMethod(vx, vy, t) { + if s.tryMethod(t, vx, vy) { return } - // Rule 3: Recursively descend into each value's underlying kind. + // Rule 3: Compare based on the underlying kind. switch t.Kind() { case reflect.Bool: - s.report(vx.Bool() == vy.Bool(), vx, vy) - return + s.report(vx.Bool() == vy.Bool(), 0) case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - s.report(vx.Int() == vy.Int(), vx, vy) - return + s.report(vx.Int() == vy.Int(), 0) case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - s.report(vx.Uint() == vy.Uint(), vx, vy) - return + s.report(vx.Uint() == vy.Uint(), 0) case reflect.Float32, reflect.Float64: - s.report(vx.Float() == vy.Float(), vx, vy) - return + s.report(vx.Float() == vy.Float(), 0) case reflect.Complex64, reflect.Complex128: - s.report(vx.Complex() == vy.Complex(), vx, vy) - return + s.report(vx.Complex() == vy.Complex(), 0) case reflect.String: - s.report(vx.String() == vy.String(), vx, vy) - return + s.report(vx.String() == vy.String(), 0) case reflect.Chan, reflect.UnsafePointer: - s.report(vx.Pointer() == vy.Pointer(), vx, vy) - return + s.report(vx.Pointer() == vy.Pointer(), 0) case reflect.Func: - s.report(vx.IsNil() && vy.IsNil(), vx, vy) - return + s.report(vx.IsNil() && vy.IsNil(), 0) + case reflect.Struct: + s.compareStruct(t, vx, vy) + case reflect.Slice, reflect.Array: + s.compareSlice(t, vx, vy) + case reflect.Map: + s.compareMap(t, vx, vy) case reflect.Ptr: - if vx.IsNil() || vy.IsNil() { - s.report(vx.IsNil() && vy.IsNil(), vx, vy) - return - } - s.curPath.push(&indirect{pathStep{t.Elem()}}) - defer s.curPath.pop() - s.compareAny(vx.Elem(), vy.Elem()) - return + s.comparePtr(t, vx, vy) case reflect.Interface: - if vx.IsNil() || vy.IsNil() { - s.report(vx.IsNil() && vy.IsNil(), vx, vy) - return - } - if vx.Elem().Type() != vy.Elem().Type() { - s.report(false, vx.Elem(), vy.Elem()) - return - } - s.curPath.push(&typeAssertion{pathStep{vx.Elem().Type()}}) - defer s.curPath.pop() - s.compareAny(vx.Elem(), vy.Elem()) - return - case reflect.Slice: - if vx.IsNil() || vy.IsNil() { - s.report(vx.IsNil() && vy.IsNil(), vx, vy) - return - } - fallthrough - case reflect.Array: - s.compareArray(vx, vy, t) - return - case reflect.Map: - s.compareMap(vx, vy, t) - return - case reflect.Struct: - s.compareStruct(vx, vy, t) - return + s.compareInterface(t, vx, vy) default: panic(fmt.Sprintf("%v kind not handled", t.Kind())) } } -func (s *state) tryExporting(vx, vy reflect.Value) (reflect.Value, reflect.Value) { - if sf, ok := s.curPath[len(s.curPath)-1].(*structField); ok && sf.unexported { - if sf.force { - // Use unsafe pointer arithmetic to get read-write access to an - // unexported field in the struct. - vx = unsafeRetrieveField(sf.pvx, sf.field) - vy = unsafeRetrieveField(sf.pvy, sf.field) - } else { - // We are not allowed to export the value, so invalidate them - // so that tryOptions can panic later if not explicitly ignored. - vx = nothing - vy = nothing - } - } - return vx, vy -} - -func (s *state) tryOptions(vx, vy reflect.Value, t reflect.Type) bool { - // If there were no FilterValues, we will not detect invalid inputs, - // so manually check for them and append invalid if necessary. - // We still evaluate the options since an ignore can override invalid. - opts := s.opts - if !vx.IsValid() || !vy.IsValid() { - opts = Options{opts, invalid{}} - } - +func (s *state) tryOptions(t reflect.Type, vx, vy reflect.Value) bool { // Evaluate all filters and apply the remaining options. - if opt := opts.filter(s, vx, vy, t); opt != nil { + if opt := s.opts.filter(s, t, vx, vy); opt != nil { opt.apply(s, vx, vy) return true } return false } -func (s *state) tryMethod(vx, vy reflect.Value, t reflect.Type) bool { +func (s *state) tryMethod(t reflect.Type, vx, vy reflect.Value) bool { // Check if this type even has an Equal method. m, ok := t.MethodByName("Equal") if !ok || !function.IsType(m.Type, function.EqualAssignable) { @@ -318,11 +280,11 @@ func (s *state) tryMethod(vx, vy reflect.Value, t reflect.Type) bool { } eq := s.callTTBFunc(m.Func, vx, vy) - s.report(eq, vx, vy) + s.report(eq, reportByMethod) return true } -func (s *state) callTRFunc(f, v reflect.Value) reflect.Value { +func (s *state) callTRFunc(f, v reflect.Value, step Transform) reflect.Value { v = sanitizeValue(v, f.Type().In(0)) if !s.dynChecker.Next() { return f.Call([]reflect.Value{v})[0] @@ -333,15 +295,15 @@ func (s *state) callTRFunc(f, v reflect.Value) reflect.Value { // unsafe mutations to the input. c := make(chan reflect.Value) go detectRaces(c, f, v) + got := <-c want := f.Call([]reflect.Value{v})[0] - if got := <-c; !s.statelessCompare(got, want).Equal() { + if step.vx, step.vy = got, want; !s.statelessCompare(step).Equal() { // To avoid false-positives with non-reflexive equality operations, // we sanity check whether a value is equal to itself. - if !s.statelessCompare(want, want).Equal() { + if step.vx, step.vy = want, want; !s.statelessCompare(step).Equal() { return want } - fn := getFuncName(f.Pointer()) - panic(fmt.Sprintf("non-deterministic function detected: %s", fn)) + panic(fmt.Sprintf("non-deterministic function detected: %s", function.NameOf(f))) } return want } @@ -359,10 +321,10 @@ func (s *state) callTTBFunc(f, x, y reflect.Value) bool { // unsafe mutations to the input. c := make(chan reflect.Value) go detectRaces(c, f, y, x) + got := <-c want := f.Call([]reflect.Value{x, y})[0].Bool() - if got := <-c; !got.IsValid() || got.Bool() != want { - fn := getFuncName(f.Pointer()) - panic(fmt.Sprintf("non-deterministic or non-symmetric function detected: %s", fn)) + if !got.IsValid() || got.Bool() != want { + panic(fmt.Sprintf("non-deterministic or non-symmetric function detected: %s", function.NameOf(f))) } return want } @@ -380,140 +342,241 @@ func detectRaces(c chan<- reflect.Value, f reflect.Value, vs ...reflect.Value) { // assuming that T is assignable to R. // Otherwise, it returns the input value as is. func sanitizeValue(v reflect.Value, t reflect.Type) reflect.Value { - // TODO(dsnet): Remove this hacky workaround. - // See https://golang.org/issue/22143 - if v.Kind() == reflect.Interface && v.IsNil() && v.Type() != t { - return reflect.New(t).Elem() + // TODO(dsnet): Workaround for reflect bug (https://golang.org/issue/22143). + if !flags.AtLeastGo110 { + if v.Kind() == reflect.Interface && v.IsNil() && v.Type() != t { + return reflect.New(t).Elem() + } } return v } -func (s *state) compareArray(vx, vy reflect.Value, t reflect.Type) { - step := &sliceIndex{pathStep{t.Elem()}, 0, 0} - s.curPath.push(step) +func (s *state) compareStruct(t reflect.Type, vx, vy reflect.Value) { + var vax, vay reflect.Value // Addressable versions of vx and vy - // Compute an edit-script for slices vx and vy. - es := diff.Difference(vx.Len(), vy.Len(), func(ix, iy int) diff.Result { - step.xkey, step.ykey = ix, iy - return s.statelessCompare(vx.Index(ix), vy.Index(iy)) - }) + step := StructField{&structField{}} + for i := 0; i < t.NumField(); i++ { + step.typ = t.Field(i).Type + step.vx = vx.Field(i) + step.vy = vy.Field(i) + step.name = t.Field(i).Name + step.idx = i + step.unexported = !isExported(step.name) + if step.unexported { + if step.name == "_" { + continue + } + // Defer checking of unexported fields until later to give an + // Ignore a chance to ignore the field. + if !vax.IsValid() || !vay.IsValid() { + // For retrieveUnexportedField to work, the parent struct must + // be addressable. Create a new copy of the values if + // necessary to make them addressable. + vax = makeAddressable(vx) + vay = makeAddressable(vy) + } + step.mayForce = s.exporters[t] + step.pvx = vax + step.pvy = vay + step.field = t.Field(i) + } + s.compareAny(step) + } +} - // Report the entire slice as is if the arrays are of primitive kind, - // and the arrays are different enough. - isPrimitive := false - switch t.Elem().Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, - reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, - reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128: - isPrimitive = true - } - if isPrimitive && es.Dist() > (vx.Len()+vy.Len())/4 { - s.curPath.pop() // Pop first since we are reporting the whole slice - s.report(false, vx, vy) +func (s *state) compareSlice(t reflect.Type, vx, vy reflect.Value) { + isSlice := t.Kind() == reflect.Slice + if isSlice && (vx.IsNil() || vy.IsNil()) { + s.report(vx.IsNil() && vy.IsNil(), 0) return } - // Replay the edit-script. + // TODO: Support cyclic data structures. + + step := SliceIndex{&sliceIndex{pathStep: pathStep{typ: t.Elem()}}} + withIndexes := func(ix, iy int) SliceIndex { + if ix >= 0 { + step.vx, step.xkey = vx.Index(ix), ix + } else { + step.vx, step.xkey = reflect.Value{}, -1 + } + if iy >= 0 { + step.vy, step.ykey = vy.Index(iy), iy + } else { + step.vy, step.ykey = reflect.Value{}, -1 + } + return step + } + + // Ignore options are able to ignore missing elements in a slice. + // However, detecting these reliably requires an optimal differencing + // algorithm, for which diff.Difference is not. + // + // Instead, we first iterate through both slices to detect which elements + // would be ignored if standing alone. The index of non-discarded elements + // are stored in a separate slice, which diffing is then performed on. + var indexesX, indexesY []int + var ignoredX, ignoredY []bool + for ix := 0; ix < vx.Len(); ix++ { + ignored := s.statelessCompare(withIndexes(ix, -1)).NumDiff == 0 + if !ignored { + indexesX = append(indexesX, ix) + } + ignoredX = append(ignoredX, ignored) + } + for iy := 0; iy < vy.Len(); iy++ { + ignored := s.statelessCompare(withIndexes(-1, iy)).NumDiff == 0 + if !ignored { + indexesY = append(indexesY, iy) + } + ignoredY = append(ignoredY, ignored) + } + + // Compute an edit-script for slices vx and vy (excluding ignored elements). + edits := diff.Difference(len(indexesX), len(indexesY), func(ix, iy int) diff.Result { + return s.statelessCompare(withIndexes(indexesX[ix], indexesY[iy])) + }) + + // Replay the ignore-scripts and the edit-script. var ix, iy int - for _, e := range es { + for ix < vx.Len() || iy < vy.Len() { + var e diff.EditType + switch { + case ix < len(ignoredX) && ignoredX[ix]: + e = diff.UniqueX + case iy < len(ignoredY) && ignoredY[iy]: + e = diff.UniqueY + default: + e, edits = edits[0], edits[1:] + } switch e { case diff.UniqueX: - step.xkey, step.ykey = ix, -1 - s.report(false, vx.Index(ix), nothing) + s.compareAny(withIndexes(ix, -1)) ix++ case diff.UniqueY: - step.xkey, step.ykey = -1, iy - s.report(false, nothing, vy.Index(iy)) + s.compareAny(withIndexes(-1, iy)) iy++ default: - step.xkey, step.ykey = ix, iy - if e == diff.Identity { - s.report(true, vx.Index(ix), vy.Index(iy)) - } else { - s.compareAny(vx.Index(ix), vy.Index(iy)) - } + s.compareAny(withIndexes(ix, iy)) ix++ iy++ } } - s.curPath.pop() - return } -func (s *state) compareMap(vx, vy reflect.Value, t reflect.Type) { +func (s *state) compareMap(t reflect.Type, vx, vy reflect.Value) { if vx.IsNil() || vy.IsNil() { - s.report(vx.IsNil() && vy.IsNil(), vx, vy) + s.report(vx.IsNil() && vy.IsNil(), 0) return } + // TODO: Support cyclic data structures. + // We combine and sort the two map keys so that we can perform the // comparisons in a deterministic order. - step := &mapIndex{pathStep: pathStep{t.Elem()}} - s.curPath.push(step) - defer s.curPath.pop() + step := MapIndex{&mapIndex{pathStep: pathStep{typ: t.Elem()}}} for _, k := range value.SortKeys(append(vx.MapKeys(), vy.MapKeys()...)) { + step.vx = vx.MapIndex(k) + step.vy = vy.MapIndex(k) step.key = k - vvx := vx.MapIndex(k) - vvy := vy.MapIndex(k) - switch { - case vvx.IsValid() && vvy.IsValid(): - s.compareAny(vvx, vvy) - case vvx.IsValid() && !vvy.IsValid(): - s.report(false, vvx, nothing) - case !vvx.IsValid() && vvy.IsValid(): - s.report(false, nothing, vvy) - default: - // It is possible for both vvx and vvy to be invalid if the - // key contained a NaN value in it. There is no way in - // reflection to be able to retrieve these values. - // See https://golang.org/issue/11104 - panic(fmt.Sprintf("%#v has map key with NaNs", s.curPath)) + if !step.vx.IsValid() && !step.vy.IsValid() { + // It is possible for both vx and vy to be invalid if the + // key contained a NaN value in it. + // + // Even with the ability to retrieve NaN keys in Go 1.12, + // there still isn't a sensible way to compare the values since + // a NaN key may map to multiple unordered values. + // The most reasonable way to compare NaNs would be to compare the + // set of values. However, this is impossible to do efficiently + // since set equality is provably an O(n^2) operation given only + // an Equal function. If we had a Less function or Hash function, + // this could be done in O(n*log(n)) or O(n), respectively. + // + // Rather than adding complex logic to deal with NaNs, make it + // the user's responsibility to compare such obscure maps. + const help = "consider providing a Comparer to compare the map" + panic(fmt.Sprintf("%#v has map key with NaNs\n%s", s.curPath, help)) } + s.compareAny(step) } } -func (s *state) compareStruct(vx, vy reflect.Value, t reflect.Type) { - var vax, vay reflect.Value // Addressable versions of vx and vy +func (s *state) comparePtr(t reflect.Type, vx, vy reflect.Value) { + if vx.IsNil() || vy.IsNil() { + s.report(vx.IsNil() && vy.IsNil(), 0) + return + } - step := &structField{} - s.curPath.push(step) - defer s.curPath.pop() - for i := 0; i < t.NumField(); i++ { - vvx := vx.Field(i) - vvy := vy.Field(i) - step.typ = t.Field(i).Type - step.name = t.Field(i).Name - step.idx = i - step.unexported = !isExported(step.name) - if step.unexported { - // Defer checking of unexported fields until later to give an - // Ignore a chance to ignore the field. - if !vax.IsValid() || !vay.IsValid() { - // For unsafeRetrieveField to work, the parent struct must - // be addressable. Create a new copy of the values if - // necessary to make them addressable. - vax = makeAddressable(vx) - vay = makeAddressable(vy) - } - step.force = s.exporters[t] - step.pvx = vax - step.pvy = vay - step.field = t.Field(i) + // TODO: Support cyclic data structures. + + vx, vy = vx.Elem(), vy.Elem() + s.compareAny(Indirect{&indirect{pathStep{t.Elem(), vx, vy}}}) +} + +func (s *state) compareInterface(t reflect.Type, vx, vy reflect.Value) { + if vx.IsNil() || vy.IsNil() { + s.report(vx.IsNil() && vy.IsNil(), 0) + return + } + vx, vy = vx.Elem(), vy.Elem() + if vx.Type() != vy.Type() { + s.report(false, 0) + return + } + s.compareAny(TypeAssertion{&typeAssertion{pathStep{vx.Type(), vx, vy}}}) +} + +func (s *state) report(eq bool, rf resultFlags) { + if rf&reportByIgnore == 0 { + if eq { + s.result.NumSame++ + rf |= reportEqual + } else { + s.result.NumDiff++ + rf |= reportUnequal } - s.compareAny(vvx, vvy) + } + for _, r := range s.reporters { + r.Report(Result{flags: rf}) } } -// report records the result of a single comparison. -// It also calls Report if any reporter is registered. -func (s *state) report(eq bool, vx, vy reflect.Value) { - if eq { - s.result.NSame++ - } else { - s.result.NDiff++ +// recChecker tracks the state needed to periodically perform checks that +// user provided transformers are not stuck in an infinitely recursive cycle. +type recChecker struct{ next int } + +// Check scans the Path for any recursive transformers and panics when any +// recursive transformers are detected. Note that the presence of a +// recursive Transformer does not necessarily imply an infinite cycle. +// As such, this check only activates after some minimal number of path steps. +func (rc *recChecker) Check(p Path) { + const minLen = 1 << 16 + if rc.next == 0 { + rc.next = minLen + } + if len(p) < rc.next { + return + } + rc.next <<= 1 + + // Check whether the same transformer has appeared at least twice. + var ss []string + m := map[Option]int{} + for _, ps := range p { + if t, ok := ps.(Transform); ok { + t := t.Option() + if m[t] == 1 { // Transformer was used exactly once before + tf := t.(*transformer).fnc.Type() + ss = append(ss, fmt.Sprintf("%v: %v => %v", t, tf.In(0), tf.Out(0))) + } + m[t]++ + } } - if s.reporter != nil { - s.reporter.Report(vx, vy, eq, s.curPath) + if len(ss) > 0 { + const warning = "recursive set of Transformers detected" + const help = "consider using cmpopts.AcyclicTransformer" + set := strings.Join(ss, "\n\t") + panic(fmt.Sprintf("%s:\n\t%s\n%s", warning, set, help)) } } diff --git a/vendor/github.com/google/go-cmp/cmp/unsafe_panic.go b/vendor/github.com/google/go-cmp/cmp/export_panic.go similarity index 60% rename from vendor/github.com/google/go-cmp/cmp/unsafe_panic.go rename to vendor/github.com/google/go-cmp/cmp/export_panic.go index d1518eb3a..abc3a1c3e 100644 --- a/vendor/github.com/google/go-cmp/cmp/unsafe_panic.go +++ b/vendor/github.com/google/go-cmp/cmp/export_panic.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE.md file. -// +build purego appengine js +// +build purego package cmp @@ -10,6 +10,6 @@ import "reflect" const supportAllowUnexported = false -func unsafeRetrieveField(reflect.Value, reflect.StructField) reflect.Value { - panic("unsafeRetrieveField is not implemented") +func retrieveUnexportedField(reflect.Value, reflect.StructField) reflect.Value { + panic("retrieveUnexportedField is not implemented") } diff --git a/vendor/github.com/google/go-cmp/cmp/unsafe_reflect.go b/vendor/github.com/google/go-cmp/cmp/export_unsafe.go similarity index 64% rename from vendor/github.com/google/go-cmp/cmp/unsafe_reflect.go rename to vendor/github.com/google/go-cmp/cmp/export_unsafe.go index 579b65507..59d4ee91b 100644 --- a/vendor/github.com/google/go-cmp/cmp/unsafe_reflect.go +++ b/vendor/github.com/google/go-cmp/cmp/export_unsafe.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE.md file. -// +build !purego,!appengine,!js +// +build !purego package cmp @@ -13,11 +13,11 @@ import ( const supportAllowUnexported = true -// unsafeRetrieveField uses unsafe to forcibly retrieve any field from a struct -// such that the value has read-write permissions. +// retrieveUnexportedField uses unsafe to forcibly retrieve any field from +// a struct such that the value has read-write permissions. // // The parent struct, v, must be addressable, while f must be a StructField // describing the field to retrieve. -func unsafeRetrieveField(v reflect.Value, f reflect.StructField) reflect.Value { +func retrieveUnexportedField(v reflect.Value, f reflect.StructField) reflect.Value { return reflect.NewAt(f.Type, unsafe.Pointer(v.UnsafeAddr()+f.Offset)).Elem() } diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go index 42afa4960..fe98dcc67 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE.md file. -// +build !debug +// +build !cmp_debug package diff diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go index fd9f7f177..597b6ae56 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE.md file. -// +build debug +// +build cmp_debug package diff @@ -14,7 +14,7 @@ import ( ) // The algorithm can be seen running in real-time by enabling debugging: -// go test -tags=debug -v +// go test -tags=cmp_debug -v // // Example output: // === RUN TestDifference/#34 diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go index 260befea2..3d2e42662 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go @@ -85,22 +85,31 @@ func (es EditScript) LenY() int { return len(es) - es.stats().NX } type EqualFunc func(ix int, iy int) Result // Result is the result of comparison. -// NSame is the number of sub-elements that are equal. -// NDiff is the number of sub-elements that are not equal. -type Result struct{ NSame, NDiff int } +// NumSame is the number of sub-elements that are equal. +// NumDiff is the number of sub-elements that are not equal. +type Result struct{ NumSame, NumDiff int } + +// BoolResult returns a Result that is either Equal or not Equal. +func BoolResult(b bool) Result { + if b { + return Result{NumSame: 1} // Equal, Similar + } else { + return Result{NumDiff: 2} // Not Equal, not Similar + } +} // Equal indicates whether the symbols are equal. Two symbols are equal -// if and only if NDiff == 0. If Equal, then they are also Similar. -func (r Result) Equal() bool { return r.NDiff == 0 } +// if and only if NumDiff == 0. If Equal, then they are also Similar. +func (r Result) Equal() bool { return r.NumDiff == 0 } // Similar indicates whether two symbols are similar and may be represented // by using the Modified type. As a special case, we consider binary comparisons // (i.e., those that return Result{1, 0} or Result{0, 1}) to be similar. // -// The exact ratio of NSame to NDiff to determine similarity may change. +// The exact ratio of NumSame to NumDiff to determine similarity may change. func (r Result) Similar() bool { - // Use NSame+1 to offset NSame so that binary comparisons are similar. - return r.NSame+1 >= r.NDiff + // Use NumSame+1 to offset NumSame so that binary comparisons are similar. + return r.NumSame+1 >= r.NumDiff } // Difference reports whether two lists of lengths nx and ny are equal @@ -191,9 +200,9 @@ func Difference(nx, ny int, f EqualFunc) (es EditScript) { // that two lists commonly differ because elements were added to the front // or end of the other list. // - // Running the tests with the "debug" build tag prints a visualization of - // the algorithm running in real-time. This is educational for understanding - // how the algorithm works. See debug_enable.go. + // Running the tests with the "cmp_debug" build tag prints a visualization + // of the algorithm running in real-time. This is educational for + // understanding how the algorithm works. See debug_enable.go. f = debug.Begin(nx, ny, f, &fwdPath.es, &revPath.es) for { // Forward search from the beginning. diff --git a/vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go b/vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go new file mode 100644 index 000000000..a9e7fc0b5 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go @@ -0,0 +1,9 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package flags + +// Deterministic controls whether the output of Diff should be deterministic. +// This is only used for testing. +var Deterministic bool diff --git a/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go b/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go new file mode 100644 index 000000000..01aed0a15 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go @@ -0,0 +1,10 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build !go1.10 + +package flags + +// AtLeastGo110 reports whether the Go toolchain is at least Go 1.10. +const AtLeastGo110 = false diff --git a/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go b/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go new file mode 100644 index 000000000..c0b667f58 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go @@ -0,0 +1,10 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build go1.10 + +package flags + +// AtLeastGo110 reports whether the Go toolchain is at least Go 1.10. +const AtLeastGo110 = true diff --git a/vendor/github.com/google/go-cmp/cmp/internal/function/func.go b/vendor/github.com/google/go-cmp/cmp/internal/function/func.go index 4c35ff11e..ace1dbe86 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/function/func.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/function/func.go @@ -2,25 +2,34 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE.md file. -// Package function identifies function types. +// Package function provides functionality for identifying function types. package function -import "reflect" +import ( + "reflect" + "regexp" + "runtime" + "strings" +) type funcType int const ( _ funcType = iota + tbFunc // func(T) bool ttbFunc // func(T, T) bool + trbFunc // func(T, R) bool tibFunc // func(T, I) bool trFunc // func(T) R - Equal = ttbFunc // func(T, T) bool - EqualAssignable = tibFunc // func(T, I) bool; encapsulates func(T, T) bool - Transformer = trFunc // func(T) R - ValueFilter = ttbFunc // func(T, T) bool - Less = ttbFunc // func(T, T) bool + Equal = ttbFunc // func(T, T) bool + EqualAssignable = tibFunc // func(T, I) bool; encapsulates func(T, T) bool + Transformer = trFunc // func(T) R + ValueFilter = ttbFunc // func(T, T) bool + Less = ttbFunc // func(T, T) bool + ValuePredicate = tbFunc // func(T) bool + KeyValuePredicate = trbFunc // func(T, R) bool ) var boolType = reflect.TypeOf(true) @@ -32,10 +41,18 @@ func IsType(t reflect.Type, ft funcType) bool { } ni, no := t.NumIn(), t.NumOut() switch ft { + case tbFunc: // func(T) bool + if ni == 1 && no == 1 && t.Out(0) == boolType { + return true + } case ttbFunc: // func(T, T) bool if ni == 2 && no == 1 && t.In(0) == t.In(1) && t.Out(0) == boolType { return true } + case trbFunc: // func(T, R) bool + if ni == 2 && no == 1 && t.Out(0) == boolType { + return true + } case tibFunc: // func(T, I) bool if ni == 2 && no == 1 && t.In(0).AssignableTo(t.In(1)) && t.Out(0) == boolType { return true @@ -47,3 +64,36 @@ func IsType(t reflect.Type, ft funcType) bool { } return false } + +var lastIdentRx = regexp.MustCompile(`[_\p{L}][_\p{L}\p{N}]*$`) + +// NameOf returns the name of the function value. +func NameOf(v reflect.Value) string { + fnc := runtime.FuncForPC(v.Pointer()) + if fnc == nil { + return "" + } + fullName := fnc.Name() // e.g., "long/path/name/mypkg.(*MyType).(long/path/name/mypkg.myMethod)-fm" + + // Method closures have a "-fm" suffix. + fullName = strings.TrimSuffix(fullName, "-fm") + + var name string + for len(fullName) > 0 { + inParen := strings.HasSuffix(fullName, ")") + fullName = strings.TrimSuffix(fullName, ")") + + s := lastIdentRx.FindString(fullName) + if s == "" { + break + } + name = s + "." + name + fullName = strings.TrimSuffix(fullName, s) + + if i := strings.LastIndexByte(fullName, '('); inParen && i >= 0 { + fullName = fullName[:i] + } + fullName = strings.TrimSuffix(fullName, ".") + } + return strings.TrimSuffix(name, ".") +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/format.go b/vendor/github.com/google/go-cmp/cmp/internal/value/format.go deleted file mode 100644 index 657e50877..000000000 --- a/vendor/github.com/google/go-cmp/cmp/internal/value/format.go +++ /dev/null @@ -1,277 +0,0 @@ -// Copyright 2017, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. - -// Package value provides functionality for reflect.Value types. -package value - -import ( - "fmt" - "reflect" - "strconv" - "strings" - "unicode" -) - -var stringerIface = reflect.TypeOf((*fmt.Stringer)(nil)).Elem() - -// Format formats the value v as a string. -// -// This is similar to fmt.Sprintf("%+v", v) except this: -// * Prints the type unless it can be elided -// * Avoids printing struct fields that are zero -// * Prints a nil-slice as being nil, not empty -// * Prints map entries in deterministic order -func Format(v reflect.Value, conf FormatConfig) string { - conf.printType = true - conf.followPointers = true - conf.realPointers = true - return formatAny(v, conf, nil) -} - -type FormatConfig struct { - UseStringer bool // Should the String method be used if available? - printType bool // Should we print the type before the value? - PrintPrimitiveType bool // Should we print the type of primitives? - followPointers bool // Should we recursively follow pointers? - realPointers bool // Should we print the real address of pointers? -} - -func formatAny(v reflect.Value, conf FormatConfig, visited map[uintptr]bool) string { - // TODO: Should this be a multi-line printout in certain situations? - - if !v.IsValid() { - return "" - } - if conf.UseStringer && v.Type().Implements(stringerIface) && v.CanInterface() { - if (v.Kind() == reflect.Ptr || v.Kind() == reflect.Interface) && v.IsNil() { - return "" - } - - const stringerPrefix = "s" // Indicates that the String method was used - s := v.Interface().(fmt.Stringer).String() - return stringerPrefix + formatString(s) - } - - switch v.Kind() { - case reflect.Bool: - return formatPrimitive(v.Type(), v.Bool(), conf) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return formatPrimitive(v.Type(), v.Int(), conf) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - if v.Type().PkgPath() == "" || v.Kind() == reflect.Uintptr { - // Unnamed uints are usually bytes or words, so use hexadecimal. - return formatPrimitive(v.Type(), formatHex(v.Uint()), conf) - } - return formatPrimitive(v.Type(), v.Uint(), conf) - case reflect.Float32, reflect.Float64: - return formatPrimitive(v.Type(), v.Float(), conf) - case reflect.Complex64, reflect.Complex128: - return formatPrimitive(v.Type(), v.Complex(), conf) - case reflect.String: - return formatPrimitive(v.Type(), formatString(v.String()), conf) - case reflect.UnsafePointer, reflect.Chan, reflect.Func: - return formatPointer(v, conf) - case reflect.Ptr: - if v.IsNil() { - if conf.printType { - return fmt.Sprintf("(%v)(nil)", v.Type()) - } - return "" - } - if visited[v.Pointer()] || !conf.followPointers { - return formatPointer(v, conf) - } - visited = insertPointer(visited, v.Pointer()) - return "&" + formatAny(v.Elem(), conf, visited) - case reflect.Interface: - if v.IsNil() { - if conf.printType { - return fmt.Sprintf("%v(nil)", v.Type()) - } - return "" - } - return formatAny(v.Elem(), conf, visited) - case reflect.Slice: - if v.IsNil() { - if conf.printType { - return fmt.Sprintf("%v(nil)", v.Type()) - } - return "" - } - if visited[v.Pointer()] { - return formatPointer(v, conf) - } - visited = insertPointer(visited, v.Pointer()) - fallthrough - case reflect.Array: - var ss []string - subConf := conf - subConf.printType = v.Type().Elem().Kind() == reflect.Interface - for i := 0; i < v.Len(); i++ { - s := formatAny(v.Index(i), subConf, visited) - ss = append(ss, s) - } - s := fmt.Sprintf("{%s}", strings.Join(ss, ", ")) - if conf.printType { - return v.Type().String() + s - } - return s - case reflect.Map: - if v.IsNil() { - if conf.printType { - return fmt.Sprintf("%v(nil)", v.Type()) - } - return "" - } - if visited[v.Pointer()] { - return formatPointer(v, conf) - } - visited = insertPointer(visited, v.Pointer()) - - var ss []string - keyConf, valConf := conf, conf - keyConf.printType = v.Type().Key().Kind() == reflect.Interface - keyConf.followPointers = false - valConf.printType = v.Type().Elem().Kind() == reflect.Interface - for _, k := range SortKeys(v.MapKeys()) { - sk := formatAny(k, keyConf, visited) - sv := formatAny(v.MapIndex(k), valConf, visited) - ss = append(ss, fmt.Sprintf("%s: %s", sk, sv)) - } - s := fmt.Sprintf("{%s}", strings.Join(ss, ", ")) - if conf.printType { - return v.Type().String() + s - } - return s - case reflect.Struct: - var ss []string - subConf := conf - subConf.printType = true - for i := 0; i < v.NumField(); i++ { - vv := v.Field(i) - if isZero(vv) { - continue // Elide zero value fields - } - name := v.Type().Field(i).Name - subConf.UseStringer = conf.UseStringer - s := formatAny(vv, subConf, visited) - ss = append(ss, fmt.Sprintf("%s: %s", name, s)) - } - s := fmt.Sprintf("{%s}", strings.Join(ss, ", ")) - if conf.printType { - return v.Type().String() + s - } - return s - default: - panic(fmt.Sprintf("%v kind not handled", v.Kind())) - } -} - -func formatString(s string) string { - // Use quoted string if it the same length as a raw string literal. - // Otherwise, attempt to use the raw string form. - qs := strconv.Quote(s) - if len(qs) == 1+len(s)+1 { - return qs - } - - // Disallow newlines to ensure output is a single line. - // Only allow printable runes for readability purposes. - rawInvalid := func(r rune) bool { - return r == '`' || r == '\n' || !unicode.IsPrint(r) - } - if strings.IndexFunc(s, rawInvalid) < 0 { - return "`" + s + "`" - } - return qs -} - -func formatPrimitive(t reflect.Type, v interface{}, conf FormatConfig) string { - if conf.printType && (conf.PrintPrimitiveType || t.PkgPath() != "") { - return fmt.Sprintf("%v(%v)", t, v) - } - return fmt.Sprintf("%v", v) -} - -func formatPointer(v reflect.Value, conf FormatConfig) string { - p := v.Pointer() - if !conf.realPointers { - p = 0 // For deterministic printing purposes - } - s := formatHex(uint64(p)) - if conf.printType { - return fmt.Sprintf("(%v)(%s)", v.Type(), s) - } - return s -} - -func formatHex(u uint64) string { - var f string - switch { - case u <= 0xff: - f = "0x%02x" - case u <= 0xffff: - f = "0x%04x" - case u <= 0xffffff: - f = "0x%06x" - case u <= 0xffffffff: - f = "0x%08x" - case u <= 0xffffffffff: - f = "0x%010x" - case u <= 0xffffffffffff: - f = "0x%012x" - case u <= 0xffffffffffffff: - f = "0x%014x" - case u <= 0xffffffffffffffff: - f = "0x%016x" - } - return fmt.Sprintf(f, u) -} - -// insertPointer insert p into m, allocating m if necessary. -func insertPointer(m map[uintptr]bool, p uintptr) map[uintptr]bool { - if m == nil { - m = make(map[uintptr]bool) - } - m[p] = true - return m -} - -// isZero reports whether v is the zero value. -// This does not rely on Interface and so can be used on unexported fields. -func isZero(v reflect.Value) bool { - switch v.Kind() { - case reflect.Bool: - return v.Bool() == false - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Complex64, reflect.Complex128: - return v.Complex() == 0 - case reflect.String: - return v.String() == "" - case reflect.UnsafePointer: - return v.Pointer() == 0 - case reflect.Chan, reflect.Func, reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: - return v.IsNil() - case reflect.Array: - for i := 0; i < v.Len(); i++ { - if !isZero(v.Index(i)) { - return false - } - } - return true - case reflect.Struct: - for i := 0; i < v.NumField(); i++ { - if !isZero(v.Field(i)) { - return false - } - } - return true - } - return false -} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go new file mode 100644 index 000000000..0a01c4796 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go @@ -0,0 +1,23 @@ +// Copyright 2018, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build purego + +package value + +import "reflect" + +// Pointer is an opaque typed pointer and is guaranteed to be comparable. +type Pointer struct { + p uintptr + t reflect.Type +} + +// PointerOf returns a Pointer from v, which must be a +// reflect.Ptr, reflect.Slice, or reflect.Map. +func PointerOf(v reflect.Value) Pointer { + // NOTE: Storing a pointer as an uintptr is technically incorrect as it + // assumes that the GC implementation does not use a moving collector. + return Pointer{v.Pointer(), v.Type()} +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go new file mode 100644 index 000000000..da134ae2a --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go @@ -0,0 +1,26 @@ +// Copyright 2018, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build !purego + +package value + +import ( + "reflect" + "unsafe" +) + +// Pointer is an opaque typed pointer and is guaranteed to be comparable. +type Pointer struct { + p unsafe.Pointer + t reflect.Type +} + +// PointerOf returns a Pointer from v, which must be a +// reflect.Ptr, reflect.Slice, or reflect.Map. +func PointerOf(v reflect.Value) Pointer { + // The proper representation of a pointer is unsafe.Pointer, + // which is necessary if the GC ever uses a moving collector. + return Pointer{unsafe.Pointer(v.Pointer()), v.Type()} +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go b/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go index fe8aa27a0..24fbae6e3 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go @@ -19,7 +19,7 @@ func SortKeys(vs []reflect.Value) []reflect.Value { } // Sort the map keys. - sort.Sort(valueSorter(vs)) + sort.SliceStable(vs, func(i, j int) bool { return isLess(vs[i], vs[j]) }) // Deduplicate keys (fails for NaNs). vs2 := vs[:1] @@ -31,13 +31,6 @@ func SortKeys(vs []reflect.Value) []reflect.Value { return vs2 } -// TODO: Use sort.Slice once Google AppEngine is on Go1.8 or above. -type valueSorter []reflect.Value - -func (vs valueSorter) Len() int { return len(vs) } -func (vs valueSorter) Less(i, j int) bool { return isLess(vs[i], vs[j]) } -func (vs valueSorter) Swap(i, j int) { vs[i], vs[j] = vs[j], vs[i] } - // isLess is a generic function for sorting arbitrary map keys. // The inputs must be of the same type and must be comparable. func isLess(x, y reflect.Value) bool { @@ -49,6 +42,8 @@ func isLess(x, y reflect.Value) bool { case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: return x.Uint() < y.Uint() case reflect.Float32, reflect.Float64: + // NOTE: This does not sort -0 as less than +0 + // since Go maps treat -0 and +0 as equal keys. fx, fy := x.Float(), y.Float() return fx < fy || math.IsNaN(fx) && !math.IsNaN(fy) case reflect.Complex64, reflect.Complex128: diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/zero.go b/vendor/github.com/google/go-cmp/cmp/internal/value/zero.go new file mode 100644 index 000000000..06a8ffd03 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/zero.go @@ -0,0 +1,48 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package value + +import ( + "math" + "reflect" +) + +// IsZero reports whether v is the zero value. +// This does not rely on Interface and so can be used on unexported fields. +func IsZero(v reflect.Value) bool { + switch v.Kind() { + case reflect.Bool: + return v.Bool() == false + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return math.Float64bits(v.Float()) == 0 + case reflect.Complex64, reflect.Complex128: + return math.Float64bits(real(v.Complex())) == 0 && math.Float64bits(imag(v.Complex())) == 0 + case reflect.String: + return v.String() == "" + case reflect.UnsafePointer: + return v.Pointer() == 0 + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: + return v.IsNil() + case reflect.Array: + for i := 0; i < v.Len(); i++ { + if !IsZero(v.Index(i)) { + return false + } + } + return true + case reflect.Struct: + for i := 0; i < v.NumField(); i++ { + if !IsZero(v.Field(i)) { + return false + } + } + return true + } + return false +} diff --git a/vendor/github.com/google/go-cmp/cmp/options.go b/vendor/github.com/google/go-cmp/cmp/options.go index 91d4b066e..793448160 100644 --- a/vendor/github.com/google/go-cmp/cmp/options.go +++ b/vendor/github.com/google/go-cmp/cmp/options.go @@ -7,7 +7,7 @@ package cmp import ( "fmt" "reflect" - "runtime" + "regexp" "strings" "github.com/google/go-cmp/cmp/internal/function" @@ -29,11 +29,11 @@ type Option interface { // An Options is returned only if multiple comparers or transformers // can apply simultaneously and will only contain values of those types // or sub-Options containing values of those types. - filter(s *state, vx, vy reflect.Value, t reflect.Type) applicableOption + filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption } // applicableOption represents the following types: -// Fundamental: ignore | invalid | *comparer | *transformer +// Fundamental: ignore | validator | *comparer | *transformer // Grouping: Options type applicableOption interface { Option @@ -43,7 +43,7 @@ type applicableOption interface { } // coreOption represents the following types: -// Fundamental: ignore | invalid | *comparer | *transformer +// Fundamental: ignore | validator | *comparer | *transformer // Filters: *pathFilter | *valuesFilter type coreOption interface { Option @@ -63,19 +63,19 @@ func (core) isCore() {} // on all individual options held within. type Options []Option -func (opts Options) filter(s *state, vx, vy reflect.Value, t reflect.Type) (out applicableOption) { +func (opts Options) filter(s *state, t reflect.Type, vx, vy reflect.Value) (out applicableOption) { for _, opt := range opts { - switch opt := opt.filter(s, vx, vy, t); opt.(type) { + switch opt := opt.filter(s, t, vx, vy); opt.(type) { case ignore: return ignore{} // Only ignore can short-circuit evaluation - case invalid: - out = invalid{} // Takes precedence over comparer or transformer + case validator: + out = validator{} // Takes precedence over comparer or transformer case *comparer, *transformer, Options: switch out.(type) { case nil: out = opt - case invalid: - // Keep invalid + case validator: + // Keep validator case *comparer, *transformer, Options: out = Options{out, opt} // Conflicting comparers or transformers } @@ -106,6 +106,11 @@ func (opts Options) String() string { // FilterPath returns a new Option where opt is only evaluated if filter f // returns true for the current Path in the value tree. // +// This filter is called even if a slice element or map entry is missing and +// provides an opportunity to ignore such cases. The filter function must be +// symmetric such that the filter result is identical regardless of whether the +// missing value is from x or y. +// // The option passed in may be an Ignore, Transformer, Comparer, Options, or // a previously filtered Option. func FilterPath(f func(Path) bool, opt Option) Option { @@ -124,22 +129,22 @@ type pathFilter struct { opt Option } -func (f pathFilter) filter(s *state, vx, vy reflect.Value, t reflect.Type) applicableOption { +func (f pathFilter) filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption { if f.fnc(s.curPath) { - return f.opt.filter(s, vx, vy, t) + return f.opt.filter(s, t, vx, vy) } return nil } func (f pathFilter) String() string { - fn := getFuncName(reflect.ValueOf(f.fnc).Pointer()) - return fmt.Sprintf("FilterPath(%s, %v)", fn, f.opt) + return fmt.Sprintf("FilterPath(%s, %v)", function.NameOf(reflect.ValueOf(f.fnc)), f.opt) } // FilterValues returns a new Option where opt is only evaluated if filter f, // which is a function of the form "func(T, T) bool", returns true for the -// current pair of values being compared. If the type of the values is not -// assignable to T, then this filter implicitly returns false. +// current pair of values being compared. If either value is invalid or +// the type of the values is not assignable to T, then this filter implicitly +// returns false. // // The filter function must be // symmetric (i.e., agnostic to the order of the inputs) and @@ -171,19 +176,18 @@ type valuesFilter struct { opt Option } -func (f valuesFilter) filter(s *state, vx, vy reflect.Value, t reflect.Type) applicableOption { - if !vx.IsValid() || !vy.IsValid() { - return invalid{} +func (f valuesFilter) filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption { + if !vx.IsValid() || !vx.CanInterface() || !vy.IsValid() || !vy.CanInterface() { + return nil } if (f.typ == nil || t.AssignableTo(f.typ)) && s.callTTBFunc(f.fnc, vx, vy) { - return f.opt.filter(s, vx, vy, t) + return f.opt.filter(s, t, vx, vy) } return nil } func (f valuesFilter) String() string { - fn := getFuncName(f.fnc.Pointer()) - return fmt.Sprintf("FilterValues(%s, %v)", fn, f.opt) + return fmt.Sprintf("FilterValues(%s, %v)", function.NameOf(f.fnc), f.opt) } // Ignore is an Option that causes all comparisons to be ignored. @@ -194,20 +198,45 @@ func Ignore() Option { return ignore{} } type ignore struct{ core } func (ignore) isFiltered() bool { return false } -func (ignore) filter(_ *state, _, _ reflect.Value, _ reflect.Type) applicableOption { return ignore{} } -func (ignore) apply(_ *state, _, _ reflect.Value) { return } +func (ignore) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption { return ignore{} } +func (ignore) apply(s *state, _, _ reflect.Value) { s.report(true, reportByIgnore) } func (ignore) String() string { return "Ignore()" } -// invalid is a sentinel Option type to indicate that some options could not -// be evaluated due to unexported fields. -type invalid struct{ core } +// validator is a sentinel Option type to indicate that some options could not +// be evaluated due to unexported fields, missing slice elements, or +// missing map entries. Both values are validator only for unexported fields. +type validator struct{ core } + +func (validator) filter(_ *state, _ reflect.Type, vx, vy reflect.Value) applicableOption { + if !vx.IsValid() || !vy.IsValid() { + return validator{} + } + if !vx.CanInterface() || !vy.CanInterface() { + return validator{} + } + return nil +} +func (validator) apply(s *state, vx, vy reflect.Value) { + // Implies missing slice element or map entry. + if !vx.IsValid() || !vy.IsValid() { + s.report(vx.IsValid() == vy.IsValid(), 0) + return + } + + // Unable to Interface implies unexported field without visibility access. + if !vx.CanInterface() || !vy.CanInterface() { + const help = "consider using a custom Comparer; if you control the implementation of type, you can also consider AllowUnexported or cmpopts.IgnoreUnexported" + panic(fmt.Sprintf("cannot handle unexported field: %#v\n%s", s.curPath, help)) + } -func (invalid) filter(_ *state, _, _ reflect.Value, _ reflect.Type) applicableOption { return invalid{} } -func (invalid) apply(s *state, _, _ reflect.Value) { - const help = "consider using AllowUnexported or cmpopts.IgnoreUnexported" - panic(fmt.Sprintf("cannot handle unexported field: %#v\n%s", s.curPath, help)) + panic("not reachable") } +// identRx represents a valid identifier according to the Go specification. +const identRx = `[_\p{L}][_\p{L}\p{N}]*` + +var identsRx = regexp.MustCompile(`^` + identRx + `(\.` + identRx + `)*$`) + // Transformer returns an Option that applies a transformation function that // converts values of a certain type into that of another. // @@ -220,18 +249,25 @@ func (invalid) apply(s *state, _, _ reflect.Value) { // input and output types are the same), an implicit filter is added such that // a transformer is applicable only if that exact transformer is not already // in the tail of the Path since the last non-Transform step. +// For situations where the implicit filter is still insufficient, +// consider using cmpopts.AcyclicTransformer, which adds a filter +// to prevent the transformer from being recursively applied upon itself. // // The name is a user provided label that is used as the Transform.Name in the -// transformation PathStep. If empty, an arbitrary name is used. +// transformation PathStep (and eventually shown in the Diff output). +// The name must be a valid identifier or qualified identifier in Go syntax. +// If empty, an arbitrary name is used. func Transformer(name string, f interface{}) Option { v := reflect.ValueOf(f) if !function.IsType(v.Type(), function.Transformer) || v.IsNil() { panic(fmt.Sprintf("invalid transformer function: %T", f)) } if name == "" { - name = "λ" // Lambda-symbol as place-holder for anonymous transformer - } - if !isValid(name) { + name = function.NameOf(v) + if !identsRx.MatchString(name) { + name = "λ" // Lambda-symbol as placeholder name + } + } else if !identsRx.MatchString(name) { panic(fmt.Sprintf("invalid name: %q", name)) } tr := &transformer{name: name, fnc: reflect.ValueOf(f)} @@ -250,9 +286,9 @@ type transformer struct { func (tr *transformer) isFiltered() bool { return tr.typ != nil } -func (tr *transformer) filter(s *state, _, _ reflect.Value, t reflect.Type) applicableOption { +func (tr *transformer) filter(s *state, t reflect.Type, _, _ reflect.Value) applicableOption { for i := len(s.curPath) - 1; i >= 0; i-- { - if t, ok := s.curPath[i].(*transform); !ok { + if t, ok := s.curPath[i].(Transform); !ok { break // Hit most recent non-Transform step } else if tr == t.trans { return nil // Cannot directly use same Transform @@ -265,18 +301,15 @@ func (tr *transformer) filter(s *state, _, _ reflect.Value, t reflect.Type) appl } func (tr *transformer) apply(s *state, vx, vy reflect.Value) { - // Update path before calling the Transformer so that dynamic checks - // will use the updated path. - s.curPath.push(&transform{pathStep{tr.fnc.Type().Out(0)}, tr}) - defer s.curPath.pop() - - vx = s.callTRFunc(tr.fnc, vx) - vy = s.callTRFunc(tr.fnc, vy) - s.compareAny(vx, vy) + step := Transform{&transform{pathStep{typ: tr.fnc.Type().Out(0)}, tr}} + vvx := s.callTRFunc(tr.fnc, vx, step) + vvy := s.callTRFunc(tr.fnc, vy, step) + step.vx, step.vy = vvx, vvy + s.compareAny(step) } func (tr transformer) String() string { - return fmt.Sprintf("Transformer(%s, %s)", tr.name, getFuncName(tr.fnc.Pointer())) + return fmt.Sprintf("Transformer(%s, %s)", tr.name, function.NameOf(tr.fnc)) } // Comparer returns an Option that determines whether two values are equal @@ -311,7 +344,7 @@ type comparer struct { func (cm *comparer) isFiltered() bool { return cm.typ != nil } -func (cm *comparer) filter(_ *state, _, _ reflect.Value, t reflect.Type) applicableOption { +func (cm *comparer) filter(_ *state, t reflect.Type, _, _ reflect.Value) applicableOption { if cm.typ == nil || t.AssignableTo(cm.typ) { return cm } @@ -320,11 +353,11 @@ func (cm *comparer) filter(_ *state, _, _ reflect.Value, t reflect.Type) applica func (cm *comparer) apply(s *state, vx, vy reflect.Value) { eq := s.callTTBFunc(cm.fnc, vx, vy) - s.report(eq, vx, vy) + s.report(eq, reportByFunc) } func (cm comparer) String() string { - return fmt.Sprintf("Comparer(%s)", getFuncName(cm.fnc.Pointer())) + return fmt.Sprintf("Comparer(%s)", function.NameOf(cm.fnc)) } // AllowUnexported returns an Option that forcibly allows operations on @@ -338,7 +371,7 @@ func (cm comparer) String() string { // defined in an internal package where the semantic meaning of an unexported // field is in the control of the user. // -// For some cases, a custom Comparer should be used instead that defines +// In many cases, a custom Comparer should be used instead that defines // equality as a function of the public API of a type rather than the underlying // unexported implementation. // @@ -370,27 +403,92 @@ func AllowUnexported(types ...interface{}) Option { type visibleStructs map[reflect.Type]bool -func (visibleStructs) filter(_ *state, _, _ reflect.Value, _ reflect.Type) applicableOption { +func (visibleStructs) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption { panic("not implemented") } -// reporter is an Option that configures how differences are reported. -type reporter interface { - // TODO: Not exported yet. +// Result represents the comparison result for a single node and +// is provided by cmp when calling Result (see Reporter). +type Result struct { + _ [0]func() // Make Result incomparable + flags resultFlags +} + +// Equal reports whether the node was determined to be equal or not. +// As a special case, ignored nodes are considered equal. +func (r Result) Equal() bool { + return r.flags&(reportEqual|reportByIgnore) != 0 +} + +// ByIgnore reports whether the node is equal because it was ignored. +// This never reports true if Equal reports false. +func (r Result) ByIgnore() bool { + return r.flags&reportByIgnore != 0 +} + +// ByMethod reports whether the Equal method determined equality. +func (r Result) ByMethod() bool { + return r.flags&reportByMethod != 0 +} + +// ByFunc reports whether a Comparer function determined equality. +func (r Result) ByFunc() bool { + return r.flags&reportByFunc != 0 +} + +type resultFlags uint + +const ( + _ resultFlags = (1 << iota) / 2 + + reportEqual + reportUnequal + reportByIgnore + reportByMethod + reportByFunc +) + +// Reporter is an Option that can be passed to Equal. When Equal traverses +// the value trees, it calls PushStep as it descends into each node in the +// tree and PopStep as it ascend out of the node. The leaves of the tree are +// either compared (determined to be equal or not equal) or ignored and reported +// as such by calling the Report method. +func Reporter(r interface { + // PushStep is called when a tree-traversal operation is performed. + // The PathStep itself is only valid until the step is popped. + // The PathStep.Values are valid for the duration of the entire traversal + // and must not be mutated. + // + // Equal always calls PushStep at the start to provide an operation-less + // PathStep used to report the root values. // - // Perhaps add PushStep and PopStep and change Report to only accept - // a PathStep instead of the full-path? Adding a PushStep and PopStep makes - // it clear that we are traversing the value tree in a depth-first-search - // manner, which has an effect on how values are printed. + // Within a slice, the exact set of inserted, removed, or modified elements + // is unspecified and may change in future implementations. + // The entries of a map are iterated through in an unspecified order. + PushStep(PathStep) + + // Report is called exactly once on leaf nodes to report whether the + // comparison identified the node as equal, unequal, or ignored. + // A leaf node is one that is immediately preceded by and followed by + // a pair of PushStep and PopStep calls. + Report(Result) + + // PopStep ascends back up the value tree. + // There is always a matching pop call for every push call. + PopStep() +}) Option { + return reporter{r} +} - Option +type reporter struct{ reporterIface } +type reporterIface interface { + PushStep(PathStep) + Report(Result) + PopStep() +} - // Report is called for every comparison made and will be provided with - // the two values being compared, the equality result, and the - // current path in the value tree. It is possible for x or y to be an - // invalid reflect.Value if one of the values is non-existent; - // which is possible with maps and slices. - Report(x, y reflect.Value, eq bool, p Path) +func (reporter) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption { + panic("not implemented") } // normalizeOption normalizes the input options such that all Options groups @@ -424,30 +522,3 @@ func flattenOptions(dst, src Options) Options { } return dst } - -// getFuncName returns a short function name from the pointer. -// The string parsing logic works up until Go1.9. -func getFuncName(p uintptr) string { - fnc := runtime.FuncForPC(p) - if fnc == nil { - return "" - } - name := fnc.Name() // E.g., "long/path/name/mypkg.(mytype).(long/path/name/mypkg.myfunc)-fm" - if strings.HasSuffix(name, ")-fm") || strings.HasSuffix(name, ")·fm") { - // Strip the package name from method name. - name = strings.TrimSuffix(name, ")-fm") - name = strings.TrimSuffix(name, ")·fm") - if i := strings.LastIndexByte(name, '('); i >= 0 { - methodName := name[i+1:] // E.g., "long/path/name/mypkg.myfunc" - if j := strings.LastIndexByte(methodName, '.'); j >= 0 { - methodName = methodName[j+1:] // E.g., "myfunc" - } - name = name[:i] + methodName // E.g., "long/path/name/mypkg.(mytype)." + "myfunc" - } - } - if i := strings.LastIndexByte(name, '/'); i >= 0 { - // Strip the package name. - name = name[i+1:] // E.g., "mypkg.(mytype).myfunc" - } - return name -} diff --git a/vendor/github.com/google/go-cmp/cmp/path.go b/vendor/github.com/google/go-cmp/cmp/path.go index c08a3cf80..96fffd291 100644 --- a/vendor/github.com/google/go-cmp/cmp/path.go +++ b/vendor/github.com/google/go-cmp/cmp/path.go @@ -12,80 +12,52 @@ import ( "unicode/utf8" ) -type ( - // Path is a list of PathSteps describing the sequence of operations to get - // from some root type to the current position in the value tree. - // The first Path element is always an operation-less PathStep that exists - // simply to identify the initial type. - // - // When traversing structs with embedded structs, the embedded struct will - // always be accessed as a field before traversing the fields of the - // embedded struct themselves. That is, an exported field from the - // embedded struct will never be accessed directly from the parent struct. - Path []PathStep - - // PathStep is a union-type for specific operations to traverse - // a value's tree structure. Users of this package never need to implement - // these types as values of this type will be returned by this package. - PathStep interface { - String() string - Type() reflect.Type // Resulting type after performing the path step - isPathStep() - } +// Path is a list of PathSteps describing the sequence of operations to get +// from some root type to the current position in the value tree. +// The first Path element is always an operation-less PathStep that exists +// simply to identify the initial type. +// +// When traversing structs with embedded structs, the embedded struct will +// always be accessed as a field before traversing the fields of the +// embedded struct themselves. That is, an exported field from the +// embedded struct will never be accessed directly from the parent struct. +type Path []PathStep - // SliceIndex is an index operation on a slice or array at some index Key. - SliceIndex interface { - PathStep - Key() int // May return -1 if in a split state - - // SplitKeys returns the indexes for indexing into slices in the - // x and y values, respectively. These indexes may differ due to the - // insertion or removal of an element in one of the slices, causing - // all of the indexes to be shifted. If an index is -1, then that - // indicates that the element does not exist in the associated slice. - // - // Key is guaranteed to return -1 if and only if the indexes returned - // by SplitKeys are not the same. SplitKeys will never return -1 for - // both indexes. - SplitKeys() (x int, y int) - - isSliceIndex() - } - // MapIndex is an index operation on a map at some index Key. - MapIndex interface { - PathStep - Key() reflect.Value - isMapIndex() - } - // TypeAssertion represents a type assertion on an interface. - TypeAssertion interface { - PathStep - isTypeAssertion() - } - // StructField represents a struct field access on a field called Name. - StructField interface { - PathStep - Name() string - Index() int - isStructField() - } - // Indirect represents pointer indirection on the parent type. - Indirect interface { - PathStep - isIndirect() - } - // Transform is a transformation from the parent type to the current type. - Transform interface { - PathStep - Name() string - Func() reflect.Value +// PathStep is a union-type for specific operations to traverse +// a value's tree structure. Users of this package never need to implement +// these types as values of this type will be returned by this package. +// +// Implementations of this interface are +// StructField, SliceIndex, MapIndex, Indirect, TypeAssertion, and Transform. +type PathStep interface { + String() string - // Option returns the originally constructed Transformer option. - // The == operator can be used to detect the exact option used. - Option() Option + // Type is the resulting type after performing the path step. + Type() reflect.Type - isTransform() - } + // Values is the resulting values after performing the path step. + // The type of each valid value is guaranteed to be identical to Type. + // + // In some cases, one or both may be invalid or have restrictions: + // • For StructField, both are not interface-able if the current field + // is unexported and the struct type is not explicitly permitted by + // AllowUnexported to traverse unexported fields. + // • For SliceIndex, one may be invalid if an element is missing from + // either the x or y slice. + // • For MapIndex, one may be invalid if an entry is missing from + // either the x or y map. + // + // The provided values must not be mutated. + Values() (vx, vy reflect.Value) +} + +var ( + _ PathStep = StructField{} + _ PathStep = SliceIndex{} + _ PathStep = MapIndex{} + _ PathStep = Indirect{} + _ PathStep = TypeAssertion{} + _ PathStep = Transform{} ) func (pa *Path) push(s PathStep) { @@ -124,7 +96,7 @@ func (pa Path) Index(i int) PathStep { func (pa Path) String() string { var ss []string for _, s := range pa { - if _, ok := s.(*structField); ok { + if _, ok := s.(StructField); ok { ss = append(ss, s.String()) } } @@ -144,13 +116,13 @@ func (pa Path) GoString() string { nextStep = pa[i+1] } switch s := s.(type) { - case *indirect: + case Indirect: numIndirect++ pPre, pPost := "(", ")" switch nextStep.(type) { - case *indirect: + case Indirect: continue // Next step is indirection, so let them batch up - case *structField: + case StructField: numIndirect-- // Automatic indirection on struct fields case nil: pPre, pPost = "", "" // Last step; no need for parenthesis @@ -161,19 +133,10 @@ func (pa Path) GoString() string { } numIndirect = 0 continue - case *transform: + case Transform: ssPre = append(ssPre, s.trans.name+"(") ssPost = append(ssPost, ")") continue - case *typeAssertion: - // As a special-case, elide type assertions on anonymous types - // since they are typically generated dynamically and can be very - // verbose. For example, some transforms return interface{} because - // of Go's lack of generics, but typically take in and return the - // exact same concrete type. - if s.Type().PkgPath() == "" { - continue - } } ssPost = append(ssPost, s.String()) } @@ -183,44 +146,13 @@ func (pa Path) GoString() string { return strings.Join(ssPre, "") + strings.Join(ssPost, "") } -type ( - pathStep struct { - typ reflect.Type - } - - sliceIndex struct { - pathStep - xkey, ykey int - } - mapIndex struct { - pathStep - key reflect.Value - } - typeAssertion struct { - pathStep - } - structField struct { - pathStep - name string - idx int - - // These fields are used for forcibly accessing an unexported field. - // pvx, pvy, and field are only valid if unexported is true. - unexported bool - force bool // Forcibly allow visibility - pvx, pvy reflect.Value // Parent values - field reflect.StructField // Field information - } - indirect struct { - pathStep - } - transform struct { - pathStep - trans *transformer - } -) +type pathStep struct { + typ reflect.Type + vx, vy reflect.Value +} -func (ps pathStep) Type() reflect.Type { return ps.typ } +func (ps pathStep) Type() reflect.Type { return ps.typ } +func (ps pathStep) Values() (vx, vy reflect.Value) { return ps.vx, ps.vy } func (ps pathStep) String() string { if ps.typ == nil { return "" @@ -232,7 +164,54 @@ func (ps pathStep) String() string { return fmt.Sprintf("{%s}", s) } -func (si sliceIndex) String() string { +// StructField represents a struct field access on a field called Name. +type StructField struct{ *structField } +type structField struct { + pathStep + name string + idx int + + // These fields are used for forcibly accessing an unexported field. + // pvx, pvy, and field are only valid if unexported is true. + unexported bool + mayForce bool // Forcibly allow visibility + pvx, pvy reflect.Value // Parent values + field reflect.StructField // Field information +} + +func (sf StructField) Type() reflect.Type { return sf.typ } +func (sf StructField) Values() (vx, vy reflect.Value) { + if !sf.unexported { + return sf.vx, sf.vy // CanInterface reports true + } + + // Forcibly obtain read-write access to an unexported struct field. + if sf.mayForce { + vx = retrieveUnexportedField(sf.pvx, sf.field) + vy = retrieveUnexportedField(sf.pvy, sf.field) + return vx, vy // CanInterface reports true + } + return sf.vx, sf.vy // CanInterface reports false +} +func (sf StructField) String() string { return fmt.Sprintf(".%s", sf.name) } + +// Name is the field name. +func (sf StructField) Name() string { return sf.name } + +// Index is the index of the field in the parent struct type. +// See reflect.Type.Field. +func (sf StructField) Index() int { return sf.idx } + +// SliceIndex is an index operation on a slice or array at some index Key. +type SliceIndex struct{ *sliceIndex } +type sliceIndex struct { + pathStep + xkey, ykey int +} + +func (si SliceIndex) Type() reflect.Type { return si.typ } +func (si SliceIndex) Values() (vx, vy reflect.Value) { return si.vx, si.vy } +func (si SliceIndex) String() string { switch { case si.xkey == si.ykey: return fmt.Sprintf("[%d]", si.xkey) @@ -247,63 +226,83 @@ func (si sliceIndex) String() string { return fmt.Sprintf("[%d->%d]", si.xkey, si.ykey) } } -func (mi mapIndex) String() string { return fmt.Sprintf("[%#v]", mi.key) } -func (ta typeAssertion) String() string { return fmt.Sprintf(".(%v)", ta.typ) } -func (sf structField) String() string { return fmt.Sprintf(".%s", sf.name) } -func (in indirect) String() string { return "*" } -func (tf transform) String() string { return fmt.Sprintf("%s()", tf.trans.name) } -func (si sliceIndex) Key() int { +// Key is the index key; it may return -1 if in a split state +func (si SliceIndex) Key() int { if si.xkey != si.ykey { return -1 } return si.xkey } -func (si sliceIndex) SplitKeys() (x, y int) { return si.xkey, si.ykey } -func (mi mapIndex) Key() reflect.Value { return mi.key } -func (sf structField) Name() string { return sf.name } -func (sf structField) Index() int { return sf.idx } -func (tf transform) Name() string { return tf.trans.name } -func (tf transform) Func() reflect.Value { return tf.trans.fnc } -func (tf transform) Option() Option { return tf.trans } - -func (pathStep) isPathStep() {} -func (sliceIndex) isSliceIndex() {} -func (mapIndex) isMapIndex() {} -func (typeAssertion) isTypeAssertion() {} -func (structField) isStructField() {} -func (indirect) isIndirect() {} -func (transform) isTransform() {} -var ( - _ SliceIndex = sliceIndex{} - _ MapIndex = mapIndex{} - _ TypeAssertion = typeAssertion{} - _ StructField = structField{} - _ Indirect = indirect{} - _ Transform = transform{} - - _ PathStep = sliceIndex{} - _ PathStep = mapIndex{} - _ PathStep = typeAssertion{} - _ PathStep = structField{} - _ PathStep = indirect{} - _ PathStep = transform{} -) +// SplitKeys are the indexes for indexing into slices in the +// x and y values, respectively. These indexes may differ due to the +// insertion or removal of an element in one of the slices, causing +// all of the indexes to be shifted. If an index is -1, then that +// indicates that the element does not exist in the associated slice. +// +// Key is guaranteed to return -1 if and only if the indexes returned +// by SplitKeys are not the same. SplitKeys will never return -1 for +// both indexes. +func (si SliceIndex) SplitKeys() (ix, iy int) { return si.xkey, si.ykey } + +// MapIndex is an index operation on a map at some index Key. +type MapIndex struct{ *mapIndex } +type mapIndex struct { + pathStep + key reflect.Value +} + +func (mi MapIndex) Type() reflect.Type { return mi.typ } +func (mi MapIndex) Values() (vx, vy reflect.Value) { return mi.vx, mi.vy } +func (mi MapIndex) String() string { return fmt.Sprintf("[%#v]", mi.key) } + +// Key is the value of the map key. +func (mi MapIndex) Key() reflect.Value { return mi.key } + +// Indirect represents pointer indirection on the parent type. +type Indirect struct{ *indirect } +type indirect struct { + pathStep +} + +func (in Indirect) Type() reflect.Type { return in.typ } +func (in Indirect) Values() (vx, vy reflect.Value) { return in.vx, in.vy } +func (in Indirect) String() string { return "*" } + +// TypeAssertion represents a type assertion on an interface. +type TypeAssertion struct{ *typeAssertion } +type typeAssertion struct { + pathStep +} + +func (ta TypeAssertion) Type() reflect.Type { return ta.typ } +func (ta TypeAssertion) Values() (vx, vy reflect.Value) { return ta.vx, ta.vy } +func (ta TypeAssertion) String() string { return fmt.Sprintf(".(%v)", ta.typ) } + +// Transform is a transformation from the parent type to the current type. +type Transform struct{ *transform } +type transform struct { + pathStep + trans *transformer +} + +func (tf Transform) Type() reflect.Type { return tf.typ } +func (tf Transform) Values() (vx, vy reflect.Value) { return tf.vx, tf.vy } +func (tf Transform) String() string { return fmt.Sprintf("%s()", tf.trans.name) } + +// Name is the name of the Transformer. +func (tf Transform) Name() string { return tf.trans.name } + +// Func is the function pointer to the transformer function. +func (tf Transform) Func() reflect.Value { return tf.trans.fnc } + +// Option returns the originally constructed Transformer option. +// The == operator can be used to detect the exact option used. +func (tf Transform) Option() Option { return tf.trans } // isExported reports whether the identifier is exported. func isExported(id string) bool { r, _ := utf8.DecodeRuneInString(id) return unicode.IsUpper(r) } - -// isValid reports whether the identifier is valid. -// Empty and underscore-only strings are not valid. -func isValid(id string) bool { - ok := id != "" && id != "_" - for j, c := range id { - ok = ok && (j > 0 || !unicode.IsDigit(c)) - ok = ok && (c == '_' || unicode.IsLetter(c) || unicode.IsDigit(c)) - } - return ok -} diff --git a/vendor/github.com/google/go-cmp/cmp/report.go b/vendor/github.com/google/go-cmp/cmp/report.go new file mode 100644 index 000000000..6ddf29993 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/report.go @@ -0,0 +1,51 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +// defaultReporter implements the reporter interface. +// +// As Equal serially calls the PushStep, Report, and PopStep methods, the +// defaultReporter constructs a tree-based representation of the compared value +// and the result of each comparison (see valueNode). +// +// When the String method is called, the FormatDiff method transforms the +// valueNode tree into a textNode tree, which is a tree-based representation +// of the textual output (see textNode). +// +// Lastly, the textNode.String method produces the final report as a string. +type defaultReporter struct { + root *valueNode + curr *valueNode +} + +func (r *defaultReporter) PushStep(ps PathStep) { + r.curr = r.curr.PushStep(ps) + if r.root == nil { + r.root = r.curr + } +} +func (r *defaultReporter) Report(rs Result) { + r.curr.Report(rs) +} +func (r *defaultReporter) PopStep() { + r.curr = r.curr.PopStep() +} + +// String provides a full report of the differences detected as a structured +// literal in pseudo-Go syntax. String may only be called after the entire tree +// has been traversed. +func (r *defaultReporter) String() string { + assert(r.root != nil && r.curr == nil) + if r.root.NumDiff == 0 { + return "" + } + return formatOptions{}.FormatDiff(r.root).String() +} + +func assert(ok bool) { + if !ok { + panic("assertion failure") + } +} diff --git a/vendor/github.com/google/go-cmp/cmp/report_compare.go b/vendor/github.com/google/go-cmp/cmp/report_compare.go new file mode 100644 index 000000000..17a05eede --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/report_compare.go @@ -0,0 +1,296 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +import ( + "fmt" + "reflect" + + "github.com/google/go-cmp/cmp/internal/value" +) + +// TODO: Enforce limits? +// * Enforce maximum number of records to print per node? +// * Enforce maximum size in bytes allowed? +// * As a heuristic, use less verbosity for equal nodes than unequal nodes. +// TODO: Enforce unique outputs? +// * Avoid Stringer methods if it results in same output? +// * Print pointer address if outputs still equal? + +// numContextRecords is the number of surrounding equal records to print. +const numContextRecords = 2 + +type diffMode byte + +const ( + diffUnknown diffMode = 0 + diffIdentical diffMode = ' ' + diffRemoved diffMode = '-' + diffInserted diffMode = '+' +) + +type typeMode int + +const ( + // emitType always prints the type. + emitType typeMode = iota + // elideType never prints the type. + elideType + // autoType prints the type only for composite kinds + // (i.e., structs, slices, arrays, and maps). + autoType +) + +type formatOptions struct { + // DiffMode controls the output mode of FormatDiff. + // + // If diffUnknown, then produce a diff of the x and y values. + // If diffIdentical, then emit values as if they were equal. + // If diffRemoved, then only emit x values (ignoring y values). + // If diffInserted, then only emit y values (ignoring x values). + DiffMode diffMode + + // TypeMode controls whether to print the type for the current node. + // + // As a general rule of thumb, we always print the type of the next node + // after an interface, and always elide the type of the next node after + // a slice or map node. + TypeMode typeMode + + // formatValueOptions are options specific to printing reflect.Values. + formatValueOptions +} + +func (opts formatOptions) WithDiffMode(d diffMode) formatOptions { + opts.DiffMode = d + return opts +} +func (opts formatOptions) WithTypeMode(t typeMode) formatOptions { + opts.TypeMode = t + return opts +} + +// FormatDiff converts a valueNode tree into a textNode tree, where the later +// is a textual representation of the differences detected in the former. +func (opts formatOptions) FormatDiff(v *valueNode) textNode { + // Check whether we have specialized formatting for this node. + // This is not necessary, but helpful for producing more readable outputs. + if opts.CanFormatDiffSlice(v) { + return opts.FormatDiffSlice(v) + } + + // For leaf nodes, format the value based on the reflect.Values alone. + if v.MaxDepth == 0 { + switch opts.DiffMode { + case diffUnknown, diffIdentical: + // Format Equal. + if v.NumDiff == 0 { + outx := opts.FormatValue(v.ValueX, visitedPointers{}) + outy := opts.FormatValue(v.ValueY, visitedPointers{}) + if v.NumIgnored > 0 && v.NumSame == 0 { + return textEllipsis + } else if outx.Len() < outy.Len() { + return outx + } else { + return outy + } + } + + // Format unequal. + assert(opts.DiffMode == diffUnknown) + var list textList + outx := opts.WithTypeMode(elideType).FormatValue(v.ValueX, visitedPointers{}) + outy := opts.WithTypeMode(elideType).FormatValue(v.ValueY, visitedPointers{}) + if outx != nil { + list = append(list, textRecord{Diff: '-', Value: outx}) + } + if outy != nil { + list = append(list, textRecord{Diff: '+', Value: outy}) + } + return opts.WithTypeMode(emitType).FormatType(v.Type, list) + case diffRemoved: + return opts.FormatValue(v.ValueX, visitedPointers{}) + case diffInserted: + return opts.FormatValue(v.ValueY, visitedPointers{}) + default: + panic("invalid diff mode") + } + } + + // Descend into the child value node. + if v.TransformerName != "" { + out := opts.WithTypeMode(emitType).FormatDiff(v.Value) + out = textWrap{"Inverse(" + v.TransformerName + ", ", out, ")"} + return opts.FormatType(v.Type, out) + } else { + switch k := v.Type.Kind(); k { + case reflect.Struct, reflect.Array, reflect.Slice, reflect.Map: + return opts.FormatType(v.Type, opts.formatDiffList(v.Records, k)) + case reflect.Ptr: + return textWrap{"&", opts.FormatDiff(v.Value), ""} + case reflect.Interface: + return opts.WithTypeMode(emitType).FormatDiff(v.Value) + default: + panic(fmt.Sprintf("%v cannot have children", k)) + } + } +} + +func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind) textNode { + // Derive record name based on the data structure kind. + var name string + var formatKey func(reflect.Value) string + switch k { + case reflect.Struct: + name = "field" + opts = opts.WithTypeMode(autoType) + formatKey = func(v reflect.Value) string { return v.String() } + case reflect.Slice, reflect.Array: + name = "element" + opts = opts.WithTypeMode(elideType) + formatKey = func(reflect.Value) string { return "" } + case reflect.Map: + name = "entry" + opts = opts.WithTypeMode(elideType) + formatKey = formatMapKey + } + + // Handle unification. + switch opts.DiffMode { + case diffIdentical, diffRemoved, diffInserted: + var list textList + var deferredEllipsis bool // Add final "..." to indicate records were dropped + for _, r := range recs { + // Elide struct fields that are zero value. + if k == reflect.Struct { + var isZero bool + switch opts.DiffMode { + case diffIdentical: + isZero = value.IsZero(r.Value.ValueX) || value.IsZero(r.Value.ValueY) + case diffRemoved: + isZero = value.IsZero(r.Value.ValueX) + case diffInserted: + isZero = value.IsZero(r.Value.ValueY) + } + if isZero { + continue + } + } + // Elide ignored nodes. + if r.Value.NumIgnored > 0 && r.Value.NumSame+r.Value.NumDiff == 0 { + deferredEllipsis = !(k == reflect.Slice || k == reflect.Array) + if !deferredEllipsis { + list.AppendEllipsis(diffStats{}) + } + continue + } + if out := opts.FormatDiff(r.Value); out != nil { + list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + } + } + if deferredEllipsis { + list.AppendEllipsis(diffStats{}) + } + return textWrap{"{", list, "}"} + case diffUnknown: + default: + panic("invalid diff mode") + } + + // Handle differencing. + var list textList + groups := coalesceAdjacentRecords(name, recs) + for i, ds := range groups { + // Handle equal records. + if ds.NumDiff() == 0 { + // Compute the number of leading and trailing records to print. + var numLo, numHi int + numEqual := ds.NumIgnored + ds.NumIdentical + for numLo < numContextRecords && numLo+numHi < numEqual && i != 0 { + if r := recs[numLo].Value; r.NumIgnored > 0 && r.NumSame+r.NumDiff == 0 { + break + } + numLo++ + } + for numHi < numContextRecords && numLo+numHi < numEqual && i != len(groups)-1 { + if r := recs[numEqual-numHi-1].Value; r.NumIgnored > 0 && r.NumSame+r.NumDiff == 0 { + break + } + numHi++ + } + if numEqual-(numLo+numHi) == 1 && ds.NumIgnored == 0 { + numHi++ // Avoid pointless coalescing of a single equal record + } + + // Format the equal values. + for _, r := range recs[:numLo] { + out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value) + list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + } + if numEqual > numLo+numHi { + ds.NumIdentical -= numLo + numHi + list.AppendEllipsis(ds) + } + for _, r := range recs[numEqual-numHi : numEqual] { + out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value) + list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + } + recs = recs[numEqual:] + continue + } + + // Handle unequal records. + for _, r := range recs[:ds.NumDiff()] { + switch { + case opts.CanFormatDiffSlice(r.Value): + out := opts.FormatDiffSlice(r.Value) + list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + case r.Value.NumChildren == r.Value.MaxDepth: + outx := opts.WithDiffMode(diffRemoved).FormatDiff(r.Value) + outy := opts.WithDiffMode(diffInserted).FormatDiff(r.Value) + if outx != nil { + list = append(list, textRecord{Diff: diffRemoved, Key: formatKey(r.Key), Value: outx}) + } + if outy != nil { + list = append(list, textRecord{Diff: diffInserted, Key: formatKey(r.Key), Value: outy}) + } + default: + out := opts.FormatDiff(r.Value) + list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + } + } + recs = recs[ds.NumDiff():] + } + assert(len(recs) == 0) + return textWrap{"{", list, "}"} +} + +// coalesceAdjacentRecords coalesces the list of records into groups of +// adjacent equal, or unequal counts. +func coalesceAdjacentRecords(name string, recs []reportRecord) (groups []diffStats) { + var prevCase int // Arbitrary index into which case last occurred + lastStats := func(i int) *diffStats { + if prevCase != i { + groups = append(groups, diffStats{Name: name}) + prevCase = i + } + return &groups[len(groups)-1] + } + for _, r := range recs { + switch rv := r.Value; { + case rv.NumIgnored > 0 && rv.NumSame+rv.NumDiff == 0: + lastStats(1).NumIgnored++ + case rv.NumDiff == 0: + lastStats(1).NumIdentical++ + case rv.NumDiff > 0 && !rv.ValueY.IsValid(): + lastStats(2).NumRemoved++ + case rv.NumDiff > 0 && !rv.ValueX.IsValid(): + lastStats(2).NumInserted++ + default: + lastStats(2).NumModified++ + } + } + return groups +} diff --git a/vendor/github.com/google/go-cmp/cmp/report_reflect.go b/vendor/github.com/google/go-cmp/cmp/report_reflect.go new file mode 100644 index 000000000..2761b6289 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/report_reflect.go @@ -0,0 +1,278 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +import ( + "fmt" + "reflect" + "strconv" + "strings" + "unicode" + + "github.com/google/go-cmp/cmp/internal/flags" + "github.com/google/go-cmp/cmp/internal/value" +) + +type formatValueOptions struct { + // AvoidStringer controls whether to avoid calling custom stringer + // methods like error.Error or fmt.Stringer.String. + AvoidStringer bool + + // ShallowPointers controls whether to avoid descending into pointers. + // Useful when printing map keys, where pointer comparison is performed + // on the pointer address rather than the pointed-at value. + ShallowPointers bool + + // PrintAddresses controls whether to print the address of all pointers, + // slice elements, and maps. + PrintAddresses bool +} + +// FormatType prints the type as if it were wrapping s. +// This may return s as-is depending on the current type and TypeMode mode. +func (opts formatOptions) FormatType(t reflect.Type, s textNode) textNode { + // Check whether to emit the type or not. + switch opts.TypeMode { + case autoType: + switch t.Kind() { + case reflect.Struct, reflect.Slice, reflect.Array, reflect.Map: + if s.Equal(textNil) { + return s + } + default: + return s + } + case elideType: + return s + } + + // Determine the type label, applying special handling for unnamed types. + typeName := t.String() + if t.Name() == "" { + // According to Go grammar, certain type literals contain symbols that + // do not strongly bind to the next lexicographical token (e.g., *T). + switch t.Kind() { + case reflect.Chan, reflect.Func, reflect.Ptr: + typeName = "(" + typeName + ")" + } + typeName = strings.Replace(typeName, "struct {", "struct{", -1) + typeName = strings.Replace(typeName, "interface {", "interface{", -1) + } + + // Avoid wrap the value in parenthesis if unnecessary. + if s, ok := s.(textWrap); ok { + hasParens := strings.HasPrefix(s.Prefix, "(") && strings.HasSuffix(s.Suffix, ")") + hasBraces := strings.HasPrefix(s.Prefix, "{") && strings.HasSuffix(s.Suffix, "}") + if hasParens || hasBraces { + return textWrap{typeName, s, ""} + } + } + return textWrap{typeName + "(", s, ")"} +} + +// FormatValue prints the reflect.Value, taking extra care to avoid descending +// into pointers already in m. As pointers are visited, m is also updated. +func (opts formatOptions) FormatValue(v reflect.Value, m visitedPointers) (out textNode) { + if !v.IsValid() { + return nil + } + t := v.Type() + + // Check whether there is an Error or String method to call. + if !opts.AvoidStringer && v.CanInterface() { + // Avoid calling Error or String methods on nil receivers since many + // implementations crash when doing so. + if (t.Kind() != reflect.Ptr && t.Kind() != reflect.Interface) || !v.IsNil() { + switch v := v.Interface().(type) { + case error: + return textLine("e" + formatString(v.Error())) + case fmt.Stringer: + return textLine("s" + formatString(v.String())) + } + } + } + + // Check whether to explicitly wrap the result with the type. + var skipType bool + defer func() { + if !skipType { + out = opts.FormatType(t, out) + } + }() + + var ptr string + switch t.Kind() { + case reflect.Bool: + return textLine(fmt.Sprint(v.Bool())) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return textLine(fmt.Sprint(v.Int())) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + // Unnamed uints are usually bytes or words, so use hexadecimal. + if t.PkgPath() == "" || t.Kind() == reflect.Uintptr { + return textLine(formatHex(v.Uint())) + } + return textLine(fmt.Sprint(v.Uint())) + case reflect.Float32, reflect.Float64: + return textLine(fmt.Sprint(v.Float())) + case reflect.Complex64, reflect.Complex128: + return textLine(fmt.Sprint(v.Complex())) + case reflect.String: + return textLine(formatString(v.String())) + case reflect.UnsafePointer, reflect.Chan, reflect.Func: + return textLine(formatPointer(v)) + case reflect.Struct: + var list textList + for i := 0; i < v.NumField(); i++ { + vv := v.Field(i) + if value.IsZero(vv) { + continue // Elide fields with zero values + } + s := opts.WithTypeMode(autoType).FormatValue(vv, m) + list = append(list, textRecord{Key: t.Field(i).Name, Value: s}) + } + return textWrap{"{", list, "}"} + case reflect.Slice: + if v.IsNil() { + return textNil + } + if opts.PrintAddresses { + ptr = formatPointer(v) + } + fallthrough + case reflect.Array: + var list textList + for i := 0; i < v.Len(); i++ { + vi := v.Index(i) + if vi.CanAddr() { // Check for cyclic elements + p := vi.Addr() + if m.Visit(p) { + var out textNode + out = textLine(formatPointer(p)) + out = opts.WithTypeMode(emitType).FormatType(p.Type(), out) + out = textWrap{"*", out, ""} + list = append(list, textRecord{Value: out}) + continue + } + } + s := opts.WithTypeMode(elideType).FormatValue(vi, m) + list = append(list, textRecord{Value: s}) + } + return textWrap{ptr + "{", list, "}"} + case reflect.Map: + if v.IsNil() { + return textNil + } + if m.Visit(v) { + return textLine(formatPointer(v)) + } + + var list textList + for _, k := range value.SortKeys(v.MapKeys()) { + sk := formatMapKey(k) + sv := opts.WithTypeMode(elideType).FormatValue(v.MapIndex(k), m) + list = append(list, textRecord{Key: sk, Value: sv}) + } + if opts.PrintAddresses { + ptr = formatPointer(v) + } + return textWrap{ptr + "{", list, "}"} + case reflect.Ptr: + if v.IsNil() { + return textNil + } + if m.Visit(v) || opts.ShallowPointers { + return textLine(formatPointer(v)) + } + if opts.PrintAddresses { + ptr = formatPointer(v) + } + skipType = true // Let the underlying value print the type instead + return textWrap{"&" + ptr, opts.FormatValue(v.Elem(), m), ""} + case reflect.Interface: + if v.IsNil() { + return textNil + } + // Interfaces accept different concrete types, + // so configure the underlying value to explicitly print the type. + skipType = true // Print the concrete type instead + return opts.WithTypeMode(emitType).FormatValue(v.Elem(), m) + default: + panic(fmt.Sprintf("%v kind not handled", v.Kind())) + } +} + +// formatMapKey formats v as if it were a map key. +// The result is guaranteed to be a single line. +func formatMapKey(v reflect.Value) string { + var opts formatOptions + opts.TypeMode = elideType + opts.ShallowPointers = true + s := opts.FormatValue(v, visitedPointers{}).String() + return strings.TrimSpace(s) +} + +// formatString prints s as a double-quoted or backtick-quoted string. +func formatString(s string) string { + // Use quoted string if it the same length as a raw string literal. + // Otherwise, attempt to use the raw string form. + qs := strconv.Quote(s) + if len(qs) == 1+len(s)+1 { + return qs + } + + // Disallow newlines to ensure output is a single line. + // Only allow printable runes for readability purposes. + rawInvalid := func(r rune) bool { + return r == '`' || r == '\n' || !(unicode.IsPrint(r) || r == '\t') + } + if strings.IndexFunc(s, rawInvalid) < 0 { + return "`" + s + "`" + } + return qs +} + +// formatHex prints u as a hexadecimal integer in Go notation. +func formatHex(u uint64) string { + var f string + switch { + case u <= 0xff: + f = "0x%02x" + case u <= 0xffff: + f = "0x%04x" + case u <= 0xffffff: + f = "0x%06x" + case u <= 0xffffffff: + f = "0x%08x" + case u <= 0xffffffffff: + f = "0x%010x" + case u <= 0xffffffffffff: + f = "0x%012x" + case u <= 0xffffffffffffff: + f = "0x%014x" + case u <= 0xffffffffffffffff: + f = "0x%016x" + } + return fmt.Sprintf(f, u) +} + +// formatPointer prints the address of the pointer. +func formatPointer(v reflect.Value) string { + p := v.Pointer() + if flags.Deterministic { + p = 0xdeadf00f // Only used for stable testing purposes + } + return fmt.Sprintf("⟪0x%x⟫", p) +} + +type visitedPointers map[value.Pointer]struct{} + +// Visit inserts pointer v into the visited map and reports whether it had +// already been visited before. +func (m visitedPointers) Visit(v reflect.Value) bool { + p := value.PointerOf(v) + _, visited := m[p] + m[p] = struct{}{} + return visited +} diff --git a/vendor/github.com/google/go-cmp/cmp/report_slices.go b/vendor/github.com/google/go-cmp/cmp/report_slices.go new file mode 100644 index 000000000..eafcf2e4c --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/report_slices.go @@ -0,0 +1,333 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +import ( + "bytes" + "fmt" + "reflect" + "strings" + "unicode" + "unicode/utf8" + + "github.com/google/go-cmp/cmp/internal/diff" +) + +// CanFormatDiffSlice reports whether we support custom formatting for nodes +// that are slices of primitive kinds or strings. +func (opts formatOptions) CanFormatDiffSlice(v *valueNode) bool { + switch { + case opts.DiffMode != diffUnknown: + return false // Must be formatting in diff mode + case v.NumDiff == 0: + return false // No differences detected + case v.NumIgnored+v.NumCompared+v.NumTransformed > 0: + // TODO: Handle the case where someone uses bytes.Equal on a large slice. + return false // Some custom option was used to determined equality + case !v.ValueX.IsValid() || !v.ValueY.IsValid(): + return false // Both values must be valid + } + + switch t := v.Type; t.Kind() { + case reflect.String: + case reflect.Array, reflect.Slice: + // Only slices of primitive types have specialized handling. + switch t.Elem().Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, + reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128: + default: + return false + } + + // If a sufficient number of elements already differ, + // use specialized formatting even if length requirement is not met. + if v.NumDiff > v.NumSame { + return true + } + default: + return false + } + + // Use specialized string diffing for longer slices or strings. + const minLength = 64 + return v.ValueX.Len() >= minLength && v.ValueY.Len() >= minLength +} + +// FormatDiffSlice prints a diff for the slices (or strings) represented by v. +// This provides custom-tailored logic to make printing of differences in +// textual strings and slices of primitive kinds more readable. +func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { + assert(opts.DiffMode == diffUnknown) + t, vx, vy := v.Type, v.ValueX, v.ValueY + + // Auto-detect the type of the data. + var isLinedText, isText, isBinary bool + var sx, sy string + switch { + case t.Kind() == reflect.String: + sx, sy = vx.String(), vy.String() + isText = true // Initial estimate, verify later + case t.Kind() == reflect.Slice && t.Elem() == reflect.TypeOf(byte(0)): + sx, sy = string(vx.Bytes()), string(vy.Bytes()) + isBinary = true // Initial estimate, verify later + case t.Kind() == reflect.Array: + // Arrays need to be addressable for slice operations to work. + vx2, vy2 := reflect.New(t).Elem(), reflect.New(t).Elem() + vx2.Set(vx) + vy2.Set(vy) + vx, vy = vx2, vy2 + } + if isText || isBinary { + var numLines, lastLineIdx, maxLineLen int + isBinary = false + for i, r := range sx + sy { + if !(unicode.IsPrint(r) || unicode.IsSpace(r)) || r == utf8.RuneError { + isBinary = true + break + } + if r == '\n' { + if maxLineLen < i-lastLineIdx { + maxLineLen = i - lastLineIdx + } + lastLineIdx = i + 1 + numLines++ + } + } + isText = !isBinary + isLinedText = isText && numLines >= 4 && maxLineLen <= 256 + } + + // Format the string into printable records. + var list textList + var delim string + switch { + // If the text appears to be multi-lined text, + // then perform differencing across individual lines. + case isLinedText: + ssx := strings.Split(sx, "\n") + ssy := strings.Split(sy, "\n") + list = opts.formatDiffSlice( + reflect.ValueOf(ssx), reflect.ValueOf(ssy), 1, "line", + func(v reflect.Value, d diffMode) textRecord { + s := formatString(v.Index(0).String()) + return textRecord{Diff: d, Value: textLine(s)} + }, + ) + delim = "\n" + // If the text appears to be single-lined text, + // then perform differencing in approximately fixed-sized chunks. + // The output is printed as quoted strings. + case isText: + list = opts.formatDiffSlice( + reflect.ValueOf(sx), reflect.ValueOf(sy), 64, "byte", + func(v reflect.Value, d diffMode) textRecord { + s := formatString(v.String()) + return textRecord{Diff: d, Value: textLine(s)} + }, + ) + delim = "" + // If the text appears to be binary data, + // then perform differencing in approximately fixed-sized chunks. + // The output is inspired by hexdump. + case isBinary: + list = opts.formatDiffSlice( + reflect.ValueOf(sx), reflect.ValueOf(sy), 16, "byte", + func(v reflect.Value, d diffMode) textRecord { + var ss []string + for i := 0; i < v.Len(); i++ { + ss = append(ss, formatHex(v.Index(i).Uint())) + } + s := strings.Join(ss, ", ") + comment := commentString(fmt.Sprintf("%c|%v|", d, formatASCII(v.String()))) + return textRecord{Diff: d, Value: textLine(s), Comment: comment} + }, + ) + // For all other slices of primitive types, + // then perform differencing in approximately fixed-sized chunks. + // The size of each chunk depends on the width of the element kind. + default: + var chunkSize int + if t.Elem().Kind() == reflect.Bool { + chunkSize = 16 + } else { + switch t.Elem().Bits() { + case 8: + chunkSize = 16 + case 16: + chunkSize = 12 + case 32: + chunkSize = 8 + default: + chunkSize = 8 + } + } + list = opts.formatDiffSlice( + vx, vy, chunkSize, t.Elem().Kind().String(), + func(v reflect.Value, d diffMode) textRecord { + var ss []string + for i := 0; i < v.Len(); i++ { + switch t.Elem().Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + ss = append(ss, fmt.Sprint(v.Index(i).Int())) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + ss = append(ss, formatHex(v.Index(i).Uint())) + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128: + ss = append(ss, fmt.Sprint(v.Index(i).Interface())) + } + } + s := strings.Join(ss, ", ") + return textRecord{Diff: d, Value: textLine(s)} + }, + ) + } + + // Wrap the output with appropriate type information. + var out textNode = textWrap{"{", list, "}"} + if !isText { + // The "{...}" byte-sequence literal is not valid Go syntax for strings. + // Emit the type for extra clarity (e.g. "string{...}"). + if t.Kind() == reflect.String { + opts = opts.WithTypeMode(emitType) + } + return opts.FormatType(t, out) + } + switch t.Kind() { + case reflect.String: + out = textWrap{"strings.Join(", out, fmt.Sprintf(", %q)", delim)} + if t != reflect.TypeOf(string("")) { + out = opts.FormatType(t, out) + } + case reflect.Slice: + out = textWrap{"bytes.Join(", out, fmt.Sprintf(", %q)", delim)} + if t != reflect.TypeOf([]byte(nil)) { + out = opts.FormatType(t, out) + } + } + return out +} + +// formatASCII formats s as an ASCII string. +// This is useful for printing binary strings in a semi-legible way. +func formatASCII(s string) string { + b := bytes.Repeat([]byte{'.'}, len(s)) + for i := 0; i < len(s); i++ { + if ' ' <= s[i] && s[i] <= '~' { + b[i] = s[i] + } + } + return string(b) +} + +func (opts formatOptions) formatDiffSlice( + vx, vy reflect.Value, chunkSize int, name string, + makeRec func(reflect.Value, diffMode) textRecord, +) (list textList) { + es := diff.Difference(vx.Len(), vy.Len(), func(ix int, iy int) diff.Result { + return diff.BoolResult(vx.Index(ix).Interface() == vy.Index(iy).Interface()) + }) + + appendChunks := func(v reflect.Value, d diffMode) int { + n0 := v.Len() + for v.Len() > 0 { + n := chunkSize + if n > v.Len() { + n = v.Len() + } + list = append(list, makeRec(v.Slice(0, n), d)) + v = v.Slice(n, v.Len()) + } + return n0 - v.Len() + } + + groups := coalesceAdjacentEdits(name, es) + groups = coalesceInterveningIdentical(groups, chunkSize/4) + for i, ds := range groups { + // Print equal. + if ds.NumDiff() == 0 { + // Compute the number of leading and trailing equal bytes to print. + var numLo, numHi int + numEqual := ds.NumIgnored + ds.NumIdentical + for numLo < chunkSize*numContextRecords && numLo+numHi < numEqual && i != 0 { + numLo++ + } + for numHi < chunkSize*numContextRecords && numLo+numHi < numEqual && i != len(groups)-1 { + numHi++ + } + if numEqual-(numLo+numHi) <= chunkSize && ds.NumIgnored == 0 { + numHi = numEqual - numLo // Avoid pointless coalescing of single equal row + } + + // Print the equal bytes. + appendChunks(vx.Slice(0, numLo), diffIdentical) + if numEqual > numLo+numHi { + ds.NumIdentical -= numLo + numHi + list.AppendEllipsis(ds) + } + appendChunks(vx.Slice(numEqual-numHi, numEqual), diffIdentical) + vx = vx.Slice(numEqual, vx.Len()) + vy = vy.Slice(numEqual, vy.Len()) + continue + } + + // Print unequal. + nx := appendChunks(vx.Slice(0, ds.NumIdentical+ds.NumRemoved+ds.NumModified), diffRemoved) + vx = vx.Slice(nx, vx.Len()) + ny := appendChunks(vy.Slice(0, ds.NumIdentical+ds.NumInserted+ds.NumModified), diffInserted) + vy = vy.Slice(ny, vy.Len()) + } + assert(vx.Len() == 0 && vy.Len() == 0) + return list +} + +// coalesceAdjacentEdits coalesces the list of edits into groups of adjacent +// equal or unequal counts. +func coalesceAdjacentEdits(name string, es diff.EditScript) (groups []diffStats) { + var prevCase int // Arbitrary index into which case last occurred + lastStats := func(i int) *diffStats { + if prevCase != i { + groups = append(groups, diffStats{Name: name}) + prevCase = i + } + return &groups[len(groups)-1] + } + for _, e := range es { + switch e { + case diff.Identity: + lastStats(1).NumIdentical++ + case diff.UniqueX: + lastStats(2).NumRemoved++ + case diff.UniqueY: + lastStats(2).NumInserted++ + case diff.Modified: + lastStats(2).NumModified++ + } + } + return groups +} + +// coalesceInterveningIdentical coalesces sufficiently short (<= windowSize) +// equal groups into adjacent unequal groups that currently result in a +// dual inserted/removed printout. This acts as a high-pass filter to smooth +// out high-frequency changes within the windowSize. +func coalesceInterveningIdentical(groups []diffStats, windowSize int) []diffStats { + groups, groupsOrig := groups[:0], groups + for i, ds := range groupsOrig { + if len(groups) >= 2 && ds.NumDiff() > 0 { + prev := &groups[len(groups)-2] // Unequal group + curr := &groups[len(groups)-1] // Equal group + next := &groupsOrig[i] // Unequal group + hadX, hadY := prev.NumRemoved > 0, prev.NumInserted > 0 + hasX, hasY := next.NumRemoved > 0, next.NumInserted > 0 + if ((hadX || hasX) && (hadY || hasY)) && curr.NumIdentical <= windowSize { + *prev = prev.Append(*curr).Append(*next) + groups = groups[:len(groups)-1] // Truncate off equal group + continue + } + } + groups = append(groups, ds) + } + return groups +} diff --git a/vendor/github.com/google/go-cmp/cmp/report_text.go b/vendor/github.com/google/go-cmp/cmp/report_text.go new file mode 100644 index 000000000..8b8fcab7b --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/report_text.go @@ -0,0 +1,387 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +import ( + "bytes" + "fmt" + "math/rand" + "strings" + "time" + + "github.com/google/go-cmp/cmp/internal/flags" +) + +var randBool = rand.New(rand.NewSource(time.Now().Unix())).Intn(2) == 0 + +type indentMode int + +func (n indentMode) appendIndent(b []byte, d diffMode) []byte { + // The output of Diff is documented as being unstable to provide future + // flexibility in changing the output for more humanly readable reports. + // This logic intentionally introduces instability to the exact output + // so that users can detect accidental reliance on stability early on, + // rather than much later when an actual change to the format occurs. + if flags.Deterministic || randBool { + // Use regular spaces (U+0020). + switch d { + case diffUnknown, diffIdentical: + b = append(b, " "...) + case diffRemoved: + b = append(b, "- "...) + case diffInserted: + b = append(b, "+ "...) + } + } else { + // Use non-breaking spaces (U+00a0). + switch d { + case diffUnknown, diffIdentical: + b = append(b, "  "...) + case diffRemoved: + b = append(b, "- "...) + case diffInserted: + b = append(b, "+ "...) + } + } + return repeatCount(n).appendChar(b, '\t') +} + +type repeatCount int + +func (n repeatCount) appendChar(b []byte, c byte) []byte { + for ; n > 0; n-- { + b = append(b, c) + } + return b +} + +// textNode is a simplified tree-based representation of structured text. +// Possible node types are textWrap, textList, or textLine. +type textNode interface { + // Len reports the length in bytes of a single-line version of the tree. + // Nested textRecord.Diff and textRecord.Comment fields are ignored. + Len() int + // Equal reports whether the two trees are structurally identical. + // Nested textRecord.Diff and textRecord.Comment fields are compared. + Equal(textNode) bool + // String returns the string representation of the text tree. + // It is not guaranteed that len(x.String()) == x.Len(), + // nor that x.String() == y.String() implies that x.Equal(y). + String() string + + // formatCompactTo formats the contents of the tree as a single-line string + // to the provided buffer. Any nested textRecord.Diff and textRecord.Comment + // fields are ignored. + // + // However, not all nodes in the tree should be collapsed as a single-line. + // If a node can be collapsed as a single-line, it is replaced by a textLine + // node. Since the top-level node cannot replace itself, this also returns + // the current node itself. + // + // This does not mutate the receiver. + formatCompactTo([]byte, diffMode) ([]byte, textNode) + // formatExpandedTo formats the contents of the tree as a multi-line string + // to the provided buffer. In order for column alignment to operate well, + // formatCompactTo must be called before calling formatExpandedTo. + formatExpandedTo([]byte, diffMode, indentMode) []byte +} + +// textWrap is a wrapper that concatenates a prefix and/or a suffix +// to the underlying node. +type textWrap struct { + Prefix string // e.g., "bytes.Buffer{" + Value textNode // textWrap | textList | textLine + Suffix string // e.g., "}" +} + +func (s textWrap) Len() int { + return len(s.Prefix) + s.Value.Len() + len(s.Suffix) +} +func (s1 textWrap) Equal(s2 textNode) bool { + if s2, ok := s2.(textWrap); ok { + return s1.Prefix == s2.Prefix && s1.Value.Equal(s2.Value) && s1.Suffix == s2.Suffix + } + return false +} +func (s textWrap) String() string { + var d diffMode + var n indentMode + _, s2 := s.formatCompactTo(nil, d) + b := n.appendIndent(nil, d) // Leading indent + b = s2.formatExpandedTo(b, d, n) // Main body + b = append(b, '\n') // Trailing newline + return string(b) +} +func (s textWrap) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) { + n0 := len(b) // Original buffer length + b = append(b, s.Prefix...) + b, s.Value = s.Value.formatCompactTo(b, d) + b = append(b, s.Suffix...) + if _, ok := s.Value.(textLine); ok { + return b, textLine(b[n0:]) + } + return b, s +} +func (s textWrap) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte { + b = append(b, s.Prefix...) + b = s.Value.formatExpandedTo(b, d, n) + b = append(b, s.Suffix...) + return b +} + +// textList is a comma-separated list of textWrap or textLine nodes. +// The list may be formatted as multi-lines or single-line at the discretion +// of the textList.formatCompactTo method. +type textList []textRecord +type textRecord struct { + Diff diffMode // e.g., 0 or '-' or '+' + Key string // e.g., "MyField" + Value textNode // textWrap | textLine + Comment fmt.Stringer // e.g., "6 identical fields" +} + +// AppendEllipsis appends a new ellipsis node to the list if none already +// exists at the end. If cs is non-zero it coalesces the statistics with the +// previous diffStats. +func (s *textList) AppendEllipsis(ds diffStats) { + hasStats := ds != diffStats{} + if len(*s) == 0 || !(*s)[len(*s)-1].Value.Equal(textEllipsis) { + if hasStats { + *s = append(*s, textRecord{Value: textEllipsis, Comment: ds}) + } else { + *s = append(*s, textRecord{Value: textEllipsis}) + } + return + } + if hasStats { + (*s)[len(*s)-1].Comment = (*s)[len(*s)-1].Comment.(diffStats).Append(ds) + } +} + +func (s textList) Len() (n int) { + for i, r := range s { + n += len(r.Key) + if r.Key != "" { + n += len(": ") + } + n += r.Value.Len() + if i < len(s)-1 { + n += len(", ") + } + } + return n +} + +func (s1 textList) Equal(s2 textNode) bool { + if s2, ok := s2.(textList); ok { + if len(s1) != len(s2) { + return false + } + for i := range s1 { + r1, r2 := s1[i], s2[i] + if !(r1.Diff == r2.Diff && r1.Key == r2.Key && r1.Value.Equal(r2.Value) && r1.Comment == r2.Comment) { + return false + } + } + return true + } + return false +} + +func (s textList) String() string { + return textWrap{"{", s, "}"}.String() +} + +func (s textList) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) { + s = append(textList(nil), s...) // Avoid mutating original + + // Determine whether we can collapse this list as a single line. + n0 := len(b) // Original buffer length + var multiLine bool + for i, r := range s { + if r.Diff == diffInserted || r.Diff == diffRemoved { + multiLine = true + } + b = append(b, r.Key...) + if r.Key != "" { + b = append(b, ": "...) + } + b, s[i].Value = r.Value.formatCompactTo(b, d|r.Diff) + if _, ok := s[i].Value.(textLine); !ok { + multiLine = true + } + if r.Comment != nil { + multiLine = true + } + if i < len(s)-1 { + b = append(b, ", "...) + } + } + // Force multi-lined output when printing a removed/inserted node that + // is sufficiently long. + if (d == diffInserted || d == diffRemoved) && len(b[n0:]) > 80 { + multiLine = true + } + if !multiLine { + return b, textLine(b[n0:]) + } + return b, s +} + +func (s textList) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte { + alignKeyLens := s.alignLens( + func(r textRecord) bool { + _, isLine := r.Value.(textLine) + return r.Key == "" || !isLine + }, + func(r textRecord) int { return len(r.Key) }, + ) + alignValueLens := s.alignLens( + func(r textRecord) bool { + _, isLine := r.Value.(textLine) + return !isLine || r.Value.Equal(textEllipsis) || r.Comment == nil + }, + func(r textRecord) int { return len(r.Value.(textLine)) }, + ) + + // Format the list as a multi-lined output. + n++ + for i, r := range s { + b = n.appendIndent(append(b, '\n'), d|r.Diff) + if r.Key != "" { + b = append(b, r.Key+": "...) + } + b = alignKeyLens[i].appendChar(b, ' ') + + b = r.Value.formatExpandedTo(b, d|r.Diff, n) + if !r.Value.Equal(textEllipsis) { + b = append(b, ',') + } + b = alignValueLens[i].appendChar(b, ' ') + + if r.Comment != nil { + b = append(b, " // "+r.Comment.String()...) + } + } + n-- + + return n.appendIndent(append(b, '\n'), d) +} + +func (s textList) alignLens( + skipFunc func(textRecord) bool, + lenFunc func(textRecord) int, +) []repeatCount { + var startIdx, endIdx, maxLen int + lens := make([]repeatCount, len(s)) + for i, r := range s { + if skipFunc(r) { + for j := startIdx; j < endIdx && j < len(s); j++ { + lens[j] = repeatCount(maxLen - lenFunc(s[j])) + } + startIdx, endIdx, maxLen = i+1, i+1, 0 + } else { + if maxLen < lenFunc(r) { + maxLen = lenFunc(r) + } + endIdx = i + 1 + } + } + for j := startIdx; j < endIdx && j < len(s); j++ { + lens[j] = repeatCount(maxLen - lenFunc(s[j])) + } + return lens +} + +// textLine is a single-line segment of text and is always a leaf node +// in the textNode tree. +type textLine []byte + +var ( + textNil = textLine("nil") + textEllipsis = textLine("...") +) + +func (s textLine) Len() int { + return len(s) +} +func (s1 textLine) Equal(s2 textNode) bool { + if s2, ok := s2.(textLine); ok { + return bytes.Equal([]byte(s1), []byte(s2)) + } + return false +} +func (s textLine) String() string { + return string(s) +} +func (s textLine) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) { + return append(b, s...), s +} +func (s textLine) formatExpandedTo(b []byte, _ diffMode, _ indentMode) []byte { + return append(b, s...) +} + +type diffStats struct { + Name string + NumIgnored int + NumIdentical int + NumRemoved int + NumInserted int + NumModified int +} + +func (s diffStats) NumDiff() int { + return s.NumRemoved + s.NumInserted + s.NumModified +} + +func (s diffStats) Append(ds diffStats) diffStats { + assert(s.Name == ds.Name) + s.NumIgnored += ds.NumIgnored + s.NumIdentical += ds.NumIdentical + s.NumRemoved += ds.NumRemoved + s.NumInserted += ds.NumInserted + s.NumModified += ds.NumModified + return s +} + +// String prints a humanly-readable summary of coalesced records. +// +// Example: +// diffStats{Name: "Field", NumIgnored: 5}.String() => "5 ignored fields" +func (s diffStats) String() string { + var ss []string + var sum int + labels := [...]string{"ignored", "identical", "removed", "inserted", "modified"} + counts := [...]int{s.NumIgnored, s.NumIdentical, s.NumRemoved, s.NumInserted, s.NumModified} + for i, n := range counts { + if n > 0 { + ss = append(ss, fmt.Sprintf("%d %v", n, labels[i])) + } + sum += n + } + + // Pluralize the name (adjusting for some obscure English grammar rules). + name := s.Name + if sum > 1 { + name += "s" + if strings.HasSuffix(name, "ys") { + name = name[:len(name)-2] + "ies" // e.g., "entrys" => "entries" + } + } + + // Format the list according to English grammar (with Oxford comma). + switch n := len(ss); n { + case 0: + return "" + case 1, 2: + return strings.Join(ss, " and ") + " " + name + default: + return strings.Join(ss[:n-1], ", ") + ", and " + ss[n-1] + " " + name + } +} + +type commentString string + +func (s commentString) String() string { return string(s) } diff --git a/vendor/github.com/google/go-cmp/cmp/report_value.go b/vendor/github.com/google/go-cmp/cmp/report_value.go new file mode 100644 index 000000000..83031a7f5 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/report_value.go @@ -0,0 +1,121 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +import "reflect" + +// valueNode represents a single node within a report, which is a +// structured representation of the value tree, containing information +// regarding which nodes are equal or not. +type valueNode struct { + parent *valueNode + + Type reflect.Type + ValueX reflect.Value + ValueY reflect.Value + + // NumSame is the number of leaf nodes that are equal. + // All descendants are equal only if NumDiff is 0. + NumSame int + // NumDiff is the number of leaf nodes that are not equal. + NumDiff int + // NumIgnored is the number of leaf nodes that are ignored. + NumIgnored int + // NumCompared is the number of leaf nodes that were compared + // using an Equal method or Comparer function. + NumCompared int + // NumTransformed is the number of non-leaf nodes that were transformed. + NumTransformed int + // NumChildren is the number of transitive descendants of this node. + // This counts from zero; thus, leaf nodes have no descendants. + NumChildren int + // MaxDepth is the maximum depth of the tree. This counts from zero; + // thus, leaf nodes have a depth of zero. + MaxDepth int + + // Records is a list of struct fields, slice elements, or map entries. + Records []reportRecord // If populated, implies Value is not populated + + // Value is the result of a transformation, pointer indirect, of + // type assertion. + Value *valueNode // If populated, implies Records is not populated + + // TransformerName is the name of the transformer. + TransformerName string // If non-empty, implies Value is populated +} +type reportRecord struct { + Key reflect.Value // Invalid for slice element + Value *valueNode +} + +func (parent *valueNode) PushStep(ps PathStep) (child *valueNode) { + vx, vy := ps.Values() + child = &valueNode{parent: parent, Type: ps.Type(), ValueX: vx, ValueY: vy} + switch s := ps.(type) { + case StructField: + assert(parent.Value == nil) + parent.Records = append(parent.Records, reportRecord{Key: reflect.ValueOf(s.Name()), Value: child}) + case SliceIndex: + assert(parent.Value == nil) + parent.Records = append(parent.Records, reportRecord{Value: child}) + case MapIndex: + assert(parent.Value == nil) + parent.Records = append(parent.Records, reportRecord{Key: s.Key(), Value: child}) + case Indirect: + assert(parent.Value == nil && parent.Records == nil) + parent.Value = child + case TypeAssertion: + assert(parent.Value == nil && parent.Records == nil) + parent.Value = child + case Transform: + assert(parent.Value == nil && parent.Records == nil) + parent.Value = child + parent.TransformerName = s.Name() + parent.NumTransformed++ + default: + assert(parent == nil) // Must be the root step + } + return child +} + +func (r *valueNode) Report(rs Result) { + assert(r.MaxDepth == 0) // May only be called on leaf nodes + + if rs.ByIgnore() { + r.NumIgnored++ + } else { + if rs.Equal() { + r.NumSame++ + } else { + r.NumDiff++ + } + } + assert(r.NumSame+r.NumDiff+r.NumIgnored == 1) + + if rs.ByMethod() { + r.NumCompared++ + } + if rs.ByFunc() { + r.NumCompared++ + } + assert(r.NumCompared <= 1) +} + +func (child *valueNode) PopStep() (parent *valueNode) { + if child.parent == nil { + return nil + } + parent = child.parent + parent.NumSame += child.NumSame + parent.NumDiff += child.NumDiff + parent.NumIgnored += child.NumIgnored + parent.NumCompared += child.NumCompared + parent.NumTransformed += child.NumTransformed + parent.NumChildren += child.NumChildren + 1 + if parent.MaxDepth < child.MaxDepth+1 { + parent.MaxDepth = child.MaxDepth + 1 + } + return parent +} diff --git a/vendor/github.com/google/go-cmp/cmp/reporter.go b/vendor/github.com/google/go-cmp/cmp/reporter.go deleted file mode 100644 index 20e9f18e0..000000000 --- a/vendor/github.com/google/go-cmp/cmp/reporter.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2017, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. - -package cmp - -import ( - "fmt" - "reflect" - "strings" - - "github.com/google/go-cmp/cmp/internal/value" -) - -type defaultReporter struct { - Option - diffs []string // List of differences, possibly truncated - ndiffs int // Total number of differences - nbytes int // Number of bytes in diffs - nlines int // Number of lines in diffs -} - -var _ reporter = (*defaultReporter)(nil) - -func (r *defaultReporter) Report(x, y reflect.Value, eq bool, p Path) { - if eq { - return // Ignore equal results - } - const maxBytes = 4096 - const maxLines = 256 - r.ndiffs++ - if r.nbytes < maxBytes && r.nlines < maxLines { - sx := value.Format(x, value.FormatConfig{UseStringer: true}) - sy := value.Format(y, value.FormatConfig{UseStringer: true}) - if sx == sy { - // Unhelpful output, so use more exact formatting. - sx = value.Format(x, value.FormatConfig{PrintPrimitiveType: true}) - sy = value.Format(y, value.FormatConfig{PrintPrimitiveType: true}) - } - s := fmt.Sprintf("%#v:\n\t-: %s\n\t+: %s\n", p, sx, sy) - r.diffs = append(r.diffs, s) - r.nbytes += len(s) - r.nlines += strings.Count(s, "\n") - } -} - -func (r *defaultReporter) String() string { - s := strings.Join(r.diffs, "") - if r.ndiffs == len(r.diffs) { - return s - } - return fmt.Sprintf("%s... %d more differences ...", s, r.ndiffs-len(r.diffs)) -} diff --git a/vendor/github.com/google/uuid/.travis.yml b/vendor/github.com/google/uuid/.travis.yml new file mode 100644 index 000000000..d8156a60b --- /dev/null +++ b/vendor/github.com/google/uuid/.travis.yml @@ -0,0 +1,9 @@ +language: go + +go: + - 1.4.3 + - 1.5.3 + - tip + +script: + - go test -v ./... diff --git a/vendor/github.com/google/uuid/CONTRIBUTING.md b/vendor/github.com/google/uuid/CONTRIBUTING.md new file mode 100644 index 000000000..04fdf09f1 --- /dev/null +++ b/vendor/github.com/google/uuid/CONTRIBUTING.md @@ -0,0 +1,10 @@ +# How to contribute + +We definitely welcome patches and contribution to this project! + +### Legal requirements + +In order to protect both you and ourselves, you will need to sign the +[Contributor License Agreement](https://cla.developers.google.com/clas). + +You may have already signed it for other Google projects. diff --git a/vendor/github.com/google/uuid/CONTRIBUTORS b/vendor/github.com/google/uuid/CONTRIBUTORS new file mode 100644 index 000000000..b4bb97f6b --- /dev/null +++ b/vendor/github.com/google/uuid/CONTRIBUTORS @@ -0,0 +1,9 @@ +Paul Borman +bmatsuo +shawnps +theory +jboverfelt +dsymonds +cd1 +wallclockbuilder +dansouza diff --git a/vendor/github.com/google/uuid/LICENSE b/vendor/github.com/google/uuid/LICENSE new file mode 100644 index 000000000..5dc68268d --- /dev/null +++ b/vendor/github.com/google/uuid/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009,2014 Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/google/uuid/README.md b/vendor/github.com/google/uuid/README.md new file mode 100644 index 000000000..9d92c11f1 --- /dev/null +++ b/vendor/github.com/google/uuid/README.md @@ -0,0 +1,19 @@ +# uuid ![build status](https://travis-ci.org/google/uuid.svg?branch=master) +The uuid package generates and inspects UUIDs based on +[RFC 4122](http://tools.ietf.org/html/rfc4122) +and DCE 1.1: Authentication and Security Services. + +This package is based on the github.com/pborman/uuid package (previously named +code.google.com/p/go-uuid). It differs from these earlier packages in that +a UUID is a 16 byte array rather than a byte slice. One loss due to this +change is the ability to represent an invalid UUID (vs a NIL UUID). + +###### Install +`go get github.com/google/uuid` + +###### Documentation +[![GoDoc](https://godoc.org/github.com/google/uuid?status.svg)](http://godoc.org/github.com/google/uuid) + +Full `go doc` style documentation for the package can be viewed online without +installing this package by using the GoDoc site here: +http://godoc.org/github.com/google/uuid diff --git a/vendor/github.com/google/uuid/dce.go b/vendor/github.com/google/uuid/dce.go new file mode 100644 index 000000000..fa820b9d3 --- /dev/null +++ b/vendor/github.com/google/uuid/dce.go @@ -0,0 +1,80 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" + "fmt" + "os" +) + +// A Domain represents a Version 2 domain +type Domain byte + +// Domain constants for DCE Security (Version 2) UUIDs. +const ( + Person = Domain(0) + Group = Domain(1) + Org = Domain(2) +) + +// NewDCESecurity returns a DCE Security (Version 2) UUID. +// +// The domain should be one of Person, Group or Org. +// On a POSIX system the id should be the users UID for the Person +// domain and the users GID for the Group. The meaning of id for +// the domain Org or on non-POSIX systems is site defined. +// +// For a given domain/id pair the same token may be returned for up to +// 7 minutes and 10 seconds. +func NewDCESecurity(domain Domain, id uint32) (UUID, error) { + uuid, err := NewUUID() + if err == nil { + uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2 + uuid[9] = byte(domain) + binary.BigEndian.PutUint32(uuid[0:], id) + } + return uuid, err +} + +// NewDCEPerson returns a DCE Security (Version 2) UUID in the person +// domain with the id returned by os.Getuid. +// +// NewDCESecurity(Person, uint32(os.Getuid())) +func NewDCEPerson() (UUID, error) { + return NewDCESecurity(Person, uint32(os.Getuid())) +} + +// NewDCEGroup returns a DCE Security (Version 2) UUID in the group +// domain with the id returned by os.Getgid. +// +// NewDCESecurity(Group, uint32(os.Getgid())) +func NewDCEGroup() (UUID, error) { + return NewDCESecurity(Group, uint32(os.Getgid())) +} + +// Domain returns the domain for a Version 2 UUID. Domains are only defined +// for Version 2 UUIDs. +func (uuid UUID) Domain() Domain { + return Domain(uuid[9]) +} + +// ID returns the id for a Version 2 UUID. IDs are only defined for Version 2 +// UUIDs. +func (uuid UUID) ID() uint32 { + return binary.BigEndian.Uint32(uuid[0:4]) +} + +func (d Domain) String() string { + switch d { + case Person: + return "Person" + case Group: + return "Group" + case Org: + return "Org" + } + return fmt.Sprintf("Domain%d", int(d)) +} diff --git a/vendor/github.com/google/uuid/doc.go b/vendor/github.com/google/uuid/doc.go new file mode 100644 index 000000000..5b8a4b9af --- /dev/null +++ b/vendor/github.com/google/uuid/doc.go @@ -0,0 +1,12 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package uuid generates and inspects UUIDs. +// +// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security +// Services. +// +// A UUID is a 16 byte (128 bit) array. UUIDs may be used as keys to +// maps or compared directly. +package uuid diff --git a/vendor/github.com/google/uuid/go.mod b/vendor/github.com/google/uuid/go.mod new file mode 100644 index 000000000..fc84cd79d --- /dev/null +++ b/vendor/github.com/google/uuid/go.mod @@ -0,0 +1 @@ +module github.com/google/uuid diff --git a/vendor/github.com/google/uuid/hash.go b/vendor/github.com/google/uuid/hash.go new file mode 100644 index 000000000..b17461631 --- /dev/null +++ b/vendor/github.com/google/uuid/hash.go @@ -0,0 +1,53 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "crypto/md5" + "crypto/sha1" + "hash" +) + +// Well known namespace IDs and UUIDs +var ( + NameSpaceDNS = Must(Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8")) + NameSpaceURL = Must(Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8")) + NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8")) + NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8")) + Nil UUID // empty UUID, all zeros +) + +// NewHash returns a new UUID derived from the hash of space concatenated with +// data generated by h. The hash should be at least 16 byte in length. The +// first 16 bytes of the hash are used to form the UUID. The version of the +// UUID will be the lower 4 bits of version. NewHash is used to implement +// NewMD5 and NewSHA1. +func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID { + h.Reset() + h.Write(space[:]) + h.Write(data) + s := h.Sum(nil) + var uuid UUID + copy(uuid[:], s) + uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4) + uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant + return uuid +} + +// NewMD5 returns a new MD5 (Version 3) UUID based on the +// supplied name space and data. It is the same as calling: +// +// NewHash(md5.New(), space, data, 3) +func NewMD5(space UUID, data []byte) UUID { + return NewHash(md5.New(), space, data, 3) +} + +// NewSHA1 returns a new SHA1 (Version 5) UUID based on the +// supplied name space and data. It is the same as calling: +// +// NewHash(sha1.New(), space, data, 5) +func NewSHA1(space UUID, data []byte) UUID { + return NewHash(sha1.New(), space, data, 5) +} diff --git a/vendor/github.com/google/uuid/marshal.go b/vendor/github.com/google/uuid/marshal.go new file mode 100644 index 000000000..7f9e0c6c0 --- /dev/null +++ b/vendor/github.com/google/uuid/marshal.go @@ -0,0 +1,37 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import "fmt" + +// MarshalText implements encoding.TextMarshaler. +func (uuid UUID) MarshalText() ([]byte, error) { + var js [36]byte + encodeHex(js[:], uuid) + return js[:], nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (uuid *UUID) UnmarshalText(data []byte) error { + id, err := ParseBytes(data) + if err == nil { + *uuid = id + } + return err +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (uuid UUID) MarshalBinary() ([]byte, error) { + return uuid[:], nil +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (uuid *UUID) UnmarshalBinary(data []byte) error { + if len(data) != 16 { + return fmt.Errorf("invalid UUID (got %d bytes)", len(data)) + } + copy(uuid[:], data) + return nil +} diff --git a/vendor/github.com/google/uuid/node.go b/vendor/github.com/google/uuid/node.go new file mode 100644 index 000000000..d651a2b06 --- /dev/null +++ b/vendor/github.com/google/uuid/node.go @@ -0,0 +1,90 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "sync" +) + +var ( + nodeMu sync.Mutex + ifname string // name of interface being used + nodeID [6]byte // hardware for version 1 UUIDs + zeroID [6]byte // nodeID with only 0's +) + +// NodeInterface returns the name of the interface from which the NodeID was +// derived. The interface "user" is returned if the NodeID was set by +// SetNodeID. +func NodeInterface() string { + defer nodeMu.Unlock() + nodeMu.Lock() + return ifname +} + +// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs. +// If name is "" then the first usable interface found will be used or a random +// Node ID will be generated. If a named interface cannot be found then false +// is returned. +// +// SetNodeInterface never fails when name is "". +func SetNodeInterface(name string) bool { + defer nodeMu.Unlock() + nodeMu.Lock() + return setNodeInterface(name) +} + +func setNodeInterface(name string) bool { + iname, addr := getHardwareInterface(name) // null implementation for js + if iname != "" && addr != nil { + ifname = iname + copy(nodeID[:], addr) + return true + } + + // We found no interfaces with a valid hardware address. If name + // does not specify a specific interface generate a random Node ID + // (section 4.1.6) + if name == "" { + ifname = "random" + randomBits(nodeID[:]) + return true + } + return false +} + +// NodeID returns a slice of a copy of the current Node ID, setting the Node ID +// if not already set. +func NodeID() []byte { + defer nodeMu.Unlock() + nodeMu.Lock() + if nodeID == zeroID { + setNodeInterface("") + } + nid := nodeID + return nid[:] +} + +// SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes +// of id are used. If id is less than 6 bytes then false is returned and the +// Node ID is not set. +func SetNodeID(id []byte) bool { + if len(id) < 6 { + return false + } + defer nodeMu.Unlock() + nodeMu.Lock() + copy(nodeID[:], id) + ifname = "user" + return true +} + +// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is +// not valid. The NodeID is only well defined for version 1 and 2 UUIDs. +func (uuid UUID) NodeID() []byte { + var node [6]byte + copy(node[:], uuid[10:]) + return node[:] +} diff --git a/vendor/github.com/google/uuid/node_js.go b/vendor/github.com/google/uuid/node_js.go new file mode 100644 index 000000000..24b78edc9 --- /dev/null +++ b/vendor/github.com/google/uuid/node_js.go @@ -0,0 +1,12 @@ +// Copyright 2017 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build js + +package uuid + +// getHardwareInterface returns nil values for the JS version of the code. +// This remvoves the "net" dependency, because it is not used in the browser. +// Using the "net" library inflates the size of the transpiled JS code by 673k bytes. +func getHardwareInterface(name string) (string, []byte) { return "", nil } diff --git a/vendor/github.com/google/uuid/node_net.go b/vendor/github.com/google/uuid/node_net.go new file mode 100644 index 000000000..0cbbcddbd --- /dev/null +++ b/vendor/github.com/google/uuid/node_net.go @@ -0,0 +1,33 @@ +// Copyright 2017 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !js + +package uuid + +import "net" + +var interfaces []net.Interface // cached list of interfaces + +// getHardwareInterface returns the name and hardware address of interface name. +// If name is "" then the name and hardware address of one of the system's +// interfaces is returned. If no interfaces are found (name does not exist or +// there are no interfaces) then "", nil is returned. +// +// Only addresses of at least 6 bytes are returned. +func getHardwareInterface(name string) (string, []byte) { + if interfaces == nil { + var err error + interfaces, err = net.Interfaces() + if err != nil { + return "", nil + } + } + for _, ifs := range interfaces { + if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) { + return ifs.Name, ifs.HardwareAddr + } + } + return "", nil +} diff --git a/vendor/github.com/google/uuid/sql.go b/vendor/github.com/google/uuid/sql.go new file mode 100644 index 000000000..f326b54db --- /dev/null +++ b/vendor/github.com/google/uuid/sql.go @@ -0,0 +1,59 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "database/sql/driver" + "fmt" +) + +// Scan implements sql.Scanner so UUIDs can be read from databases transparently +// Currently, database types that map to string and []byte are supported. Please +// consult database-specific driver documentation for matching types. +func (uuid *UUID) Scan(src interface{}) error { + switch src := src.(type) { + case nil: + return nil + + case string: + // if an empty UUID comes from a table, we return a null UUID + if src == "" { + return nil + } + + // see Parse for required string format + u, err := Parse(src) + if err != nil { + return fmt.Errorf("Scan: %v", err) + } + + *uuid = u + + case []byte: + // if an empty UUID comes from a table, we return a null UUID + if len(src) == 0 { + return nil + } + + // assumes a simple slice of bytes if 16 bytes + // otherwise attempts to parse + if len(src) != 16 { + return uuid.Scan(string(src)) + } + copy((*uuid)[:], src) + + default: + return fmt.Errorf("Scan: unable to scan type %T into UUID", src) + } + + return nil +} + +// Value implements sql.Valuer so that UUIDs can be written to databases +// transparently. Currently, UUIDs map to strings. Please consult +// database-specific driver documentation for matching types. +func (uuid UUID) Value() (driver.Value, error) { + return uuid.String(), nil +} diff --git a/vendor/github.com/google/uuid/time.go b/vendor/github.com/google/uuid/time.go new file mode 100644 index 000000000..e6ef06cdc --- /dev/null +++ b/vendor/github.com/google/uuid/time.go @@ -0,0 +1,123 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" + "sync" + "time" +) + +// A Time represents a time as the number of 100's of nanoseconds since 15 Oct +// 1582. +type Time int64 + +const ( + lillian = 2299160 // Julian day of 15 Oct 1582 + unix = 2440587 // Julian day of 1 Jan 1970 + epoch = unix - lillian // Days between epochs + g1582 = epoch * 86400 // seconds between epochs + g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs +) + +var ( + timeMu sync.Mutex + lasttime uint64 // last time we returned + clockSeq uint16 // clock sequence for this run + + timeNow = time.Now // for testing +) + +// UnixTime converts t the number of seconds and nanoseconds using the Unix +// epoch of 1 Jan 1970. +func (t Time) UnixTime() (sec, nsec int64) { + sec = int64(t - g1582ns100) + nsec = (sec % 10000000) * 100 + sec /= 10000000 + return sec, nsec +} + +// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and +// clock sequence as well as adjusting the clock sequence as needed. An error +// is returned if the current time cannot be determined. +func GetTime() (Time, uint16, error) { + defer timeMu.Unlock() + timeMu.Lock() + return getTime() +} + +func getTime() (Time, uint16, error) { + t := timeNow() + + // If we don't have a clock sequence already, set one. + if clockSeq == 0 { + setClockSequence(-1) + } + now := uint64(t.UnixNano()/100) + g1582ns100 + + // If time has gone backwards with this clock sequence then we + // increment the clock sequence + if now <= lasttime { + clockSeq = ((clockSeq + 1) & 0x3fff) | 0x8000 + } + lasttime = now + return Time(now), clockSeq, nil +} + +// ClockSequence returns the current clock sequence, generating one if not +// already set. The clock sequence is only used for Version 1 UUIDs. +// +// The uuid package does not use global static storage for the clock sequence or +// the last time a UUID was generated. Unless SetClockSequence is used, a new +// random clock sequence is generated the first time a clock sequence is +// requested by ClockSequence, GetTime, or NewUUID. (section 4.2.1.1) +func ClockSequence() int { + defer timeMu.Unlock() + timeMu.Lock() + return clockSequence() +} + +func clockSequence() int { + if clockSeq == 0 { + setClockSequence(-1) + } + return int(clockSeq & 0x3fff) +} + +// SetClockSequence sets the clock sequence to the lower 14 bits of seq. Setting to +// -1 causes a new sequence to be generated. +func SetClockSequence(seq int) { + defer timeMu.Unlock() + timeMu.Lock() + setClockSequence(seq) +} + +func setClockSequence(seq int) { + if seq == -1 { + var b [2]byte + randomBits(b[:]) // clock sequence + seq = int(b[0])<<8 | int(b[1]) + } + oldSeq := clockSeq + clockSeq = uint16(seq&0x3fff) | 0x8000 // Set our variant + if oldSeq != clockSeq { + lasttime = 0 + } +} + +// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in +// uuid. The time is only defined for version 1 and 2 UUIDs. +func (uuid UUID) Time() Time { + time := int64(binary.BigEndian.Uint32(uuid[0:4])) + time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32 + time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48 + return Time(time) +} + +// ClockSequence returns the clock sequence encoded in uuid. +// The clock sequence is only well defined for version 1 and 2 UUIDs. +func (uuid UUID) ClockSequence() int { + return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff +} diff --git a/vendor/github.com/google/uuid/util.go b/vendor/github.com/google/uuid/util.go new file mode 100644 index 000000000..5ea6c7378 --- /dev/null +++ b/vendor/github.com/google/uuid/util.go @@ -0,0 +1,43 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "io" +) + +// randomBits completely fills slice b with random data. +func randomBits(b []byte) { + if _, err := io.ReadFull(rander, b); err != nil { + panic(err.Error()) // rand should never fail + } +} + +// xvalues returns the value of a byte as a hexadecimal digit or 255. +var xvalues = [256]byte{ + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255, + 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, +} + +// xtob converts hex characters x1 and x2 into a byte. +func xtob(x1, x2 byte) (byte, bool) { + b1 := xvalues[x1] + b2 := xvalues[x2] + return (b1 << 4) | b2, b1 != 255 && b2 != 255 +} diff --git a/vendor/github.com/google/uuid/uuid.go b/vendor/github.com/google/uuid/uuid.go new file mode 100644 index 000000000..524404cc5 --- /dev/null +++ b/vendor/github.com/google/uuid/uuid.go @@ -0,0 +1,245 @@ +// Copyright 2018 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "bytes" + "crypto/rand" + "encoding/hex" + "errors" + "fmt" + "io" + "strings" +) + +// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC +// 4122. +type UUID [16]byte + +// A Version represents a UUID's version. +type Version byte + +// A Variant represents a UUID's variant. +type Variant byte + +// Constants returned by Variant. +const ( + Invalid = Variant(iota) // Invalid UUID + RFC4122 // The variant specified in RFC4122 + Reserved // Reserved, NCS backward compatibility. + Microsoft // Reserved, Microsoft Corporation backward compatibility. + Future // Reserved for future definition. +) + +var rander = rand.Reader // random function + +// Parse decodes s into a UUID or returns an error. Both the standard UUID +// forms of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded as well as the +// Microsoft encoding {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} and the raw hex +// encoding: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx. +func Parse(s string) (UUID, error) { + var uuid UUID + switch len(s) { + // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + case 36: + + // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + case 36 + 9: + if strings.ToLower(s[:9]) != "urn:uuid:" { + return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9]) + } + s = s[9:] + + // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} + case 36 + 2: + s = s[1:] + + // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + case 32: + var ok bool + for i := range uuid { + uuid[i], ok = xtob(s[i*2], s[i*2+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + } + return uuid, nil + default: + return uuid, fmt.Errorf("invalid UUID length: %d", len(s)) + } + // s is now at least 36 bytes long + // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { + return uuid, errors.New("invalid UUID format") + } + for i, x := range [16]int{ + 0, 2, 4, 6, + 9, 11, + 14, 16, + 19, 21, + 24, 26, 28, 30, 32, 34} { + v, ok := xtob(s[x], s[x+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + uuid[i] = v + } + return uuid, nil +} + +// ParseBytes is like Parse, except it parses a byte slice instead of a string. +func ParseBytes(b []byte) (UUID, error) { + var uuid UUID + switch len(b) { + case 36: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + case 36 + 9: // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + if !bytes.Equal(bytes.ToLower(b[:9]), []byte("urn:uuid:")) { + return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9]) + } + b = b[9:] + case 36 + 2: // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} + b = b[1:] + case 32: // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + var ok bool + for i := 0; i < 32; i += 2 { + uuid[i/2], ok = xtob(b[i], b[i+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + } + return uuid, nil + default: + return uuid, fmt.Errorf("invalid UUID length: %d", len(b)) + } + // s is now at least 36 bytes long + // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + if b[8] != '-' || b[13] != '-' || b[18] != '-' || b[23] != '-' { + return uuid, errors.New("invalid UUID format") + } + for i, x := range [16]int{ + 0, 2, 4, 6, + 9, 11, + 14, 16, + 19, 21, + 24, 26, 28, 30, 32, 34} { + v, ok := xtob(b[x], b[x+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + uuid[i] = v + } + return uuid, nil +} + +// MustParse is like Parse but panics if the string cannot be parsed. +// It simplifies safe initialization of global variables holding compiled UUIDs. +func MustParse(s string) UUID { + uuid, err := Parse(s) + if err != nil { + panic(`uuid: Parse(` + s + `): ` + err.Error()) + } + return uuid +} + +// FromBytes creates a new UUID from a byte slice. Returns an error if the slice +// does not have a length of 16. The bytes are copied from the slice. +func FromBytes(b []byte) (uuid UUID, err error) { + err = uuid.UnmarshalBinary(b) + return uuid, err +} + +// Must returns uuid if err is nil and panics otherwise. +func Must(uuid UUID, err error) UUID { + if err != nil { + panic(err) + } + return uuid +} + +// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +// , or "" if uuid is invalid. +func (uuid UUID) String() string { + var buf [36]byte + encodeHex(buf[:], uuid) + return string(buf[:]) +} + +// URN returns the RFC 2141 URN form of uuid, +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid. +func (uuid UUID) URN() string { + var buf [36 + 9]byte + copy(buf[:], "urn:uuid:") + encodeHex(buf[9:], uuid) + return string(buf[:]) +} + +func encodeHex(dst []byte, uuid UUID) { + hex.Encode(dst, uuid[:4]) + dst[8] = '-' + hex.Encode(dst[9:13], uuid[4:6]) + dst[13] = '-' + hex.Encode(dst[14:18], uuid[6:8]) + dst[18] = '-' + hex.Encode(dst[19:23], uuid[8:10]) + dst[23] = '-' + hex.Encode(dst[24:], uuid[10:]) +} + +// Variant returns the variant encoded in uuid. +func (uuid UUID) Variant() Variant { + switch { + case (uuid[8] & 0xc0) == 0x80: + return RFC4122 + case (uuid[8] & 0xe0) == 0xc0: + return Microsoft + case (uuid[8] & 0xe0) == 0xe0: + return Future + default: + return Reserved + } +} + +// Version returns the version of uuid. +func (uuid UUID) Version() Version { + return Version(uuid[6] >> 4) +} + +func (v Version) String() string { + if v > 15 { + return fmt.Sprintf("BAD_VERSION_%d", v) + } + return fmt.Sprintf("VERSION_%d", v) +} + +func (v Variant) String() string { + switch v { + case RFC4122: + return "RFC4122" + case Reserved: + return "Reserved" + case Microsoft: + return "Microsoft" + case Future: + return "Future" + case Invalid: + return "Invalid" + } + return fmt.Sprintf("BadVariant%d", int(v)) +} + +// SetRand sets the random number generator to r, which implements io.Reader. +// If r.Read returns an error when the package requests random data then +// a panic will be issued. +// +// Calling SetRand with nil sets the random number generator to the default +// generator. +func SetRand(r io.Reader) { + if r == nil { + rander = rand.Reader + return + } + rander = r +} diff --git a/vendor/github.com/google/uuid/version1.go b/vendor/github.com/google/uuid/version1.go new file mode 100644 index 000000000..199a1ac65 --- /dev/null +++ b/vendor/github.com/google/uuid/version1.go @@ -0,0 +1,44 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" +) + +// NewUUID returns a Version 1 UUID based on the current NodeID and clock +// sequence, and the current time. If the NodeID has not been set by SetNodeID +// or SetNodeInterface then it will be set automatically. If the NodeID cannot +// be set NewUUID returns nil. If clock sequence has not been set by +// SetClockSequence then it will be set automatically. If GetTime fails to +// return the current NewUUID returns nil and an error. +// +// In most cases, New should be used. +func NewUUID() (UUID, error) { + nodeMu.Lock() + if nodeID == zeroID { + setNodeInterface("") + } + nodeMu.Unlock() + + var uuid UUID + now, seq, err := GetTime() + if err != nil { + return uuid, err + } + + timeLow := uint32(now & 0xffffffff) + timeMid := uint16((now >> 32) & 0xffff) + timeHi := uint16((now >> 48) & 0x0fff) + timeHi |= 0x1000 // Version 1 + + binary.BigEndian.PutUint32(uuid[0:], timeLow) + binary.BigEndian.PutUint16(uuid[4:], timeMid) + binary.BigEndian.PutUint16(uuid[6:], timeHi) + binary.BigEndian.PutUint16(uuid[8:], seq) + copy(uuid[10:], nodeID[:]) + + return uuid, nil +} diff --git a/vendor/github.com/google/uuid/version4.go b/vendor/github.com/google/uuid/version4.go new file mode 100644 index 000000000..84af91c9f --- /dev/null +++ b/vendor/github.com/google/uuid/version4.go @@ -0,0 +1,38 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import "io" + +// New creates a new random UUID or panics. New is equivalent to +// the expression +// +// uuid.Must(uuid.NewRandom()) +func New() UUID { + return Must(NewRandom()) +} + +// NewRandom returns a Random (Version 4) UUID. +// +// The strength of the UUIDs is based on the strength of the crypto/rand +// package. +// +// A note about uniqueness derived from the UUID Wikipedia entry: +// +// Randomly generated UUIDs have 122 random bits. One's annual risk of being +// hit by a meteorite is estimated to be one chance in 17 billion, that +// means the probability is about 0.00000000006 (6 × 10−11), +// equivalent to the odds of creating a few tens of trillions of UUIDs in a +// year and having one duplicate. +func NewRandom() (UUID, error) { + var uuid UUID + _, err := io.ReadFull(rander, uuid[:]) + if err != nil { + return Nil, err + } + uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4 + uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 + return uuid, nil +} diff --git a/vendor/github.com/knative/pkg/apis/OWNERS b/vendor/github.com/knative/pkg/apis/OWNERS deleted file mode 100644 index 887e8bc87..000000000 --- a/vendor/github.com/knative/pkg/apis/OWNERS +++ /dev/null @@ -1,6 +0,0 @@ -# The OWNERS file is used by prow to automatically merge approved PRs. - -approvers: -- mattmoor -- vaikas-google -- n3wscott diff --git a/vendor/github.com/knative/pkg/apis/contexts.go b/vendor/github.com/knative/pkg/apis/contexts.go deleted file mode 100644 index 3a775b8fc..000000000 --- a/vendor/github.com/knative/pkg/apis/contexts.go +++ /dev/null @@ -1,77 +0,0 @@ -/* -Copyright 2019 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package apis - -import ( - "context" - - authenticationv1 "k8s.io/api/authentication/v1" -) - -// This is attached to contexts passed to webhook interfaces when -// the receiver being validated is being created. -type inCreateKey struct{} - -// WithinCreate is used to note that the webhook is calling within -// the context of a Create operation. -func WithinCreate(ctx context.Context) context.Context { - return context.WithValue(ctx, inCreateKey{}, struct{}{}) -} - -// IsInCreate checks whether the context is a Create. -func IsInCreate(ctx context.Context) bool { - return ctx.Value(inCreateKey{}) != nil -} - -// This is attached to contexts passed to webhook interfaces when -// the receiver being validated is being updated. -type inUpdateKey struct{} - -// WithinUpdate is used to note that the webhook is calling within -// the context of a Update operation. -func WithinUpdate(ctx context.Context, base interface{}) context.Context { - return context.WithValue(ctx, inUpdateKey{}, base) -} - -// IsInUpdate checks whether the context is an Update. -func IsInUpdate(ctx context.Context) bool { - return ctx.Value(inUpdateKey{}) != nil -} - -// GetBaseline returns the baseline of the update, or nil when we -// are not within an update context. -func GetBaseline(ctx context.Context) interface{} { - return ctx.Value(inUpdateKey{}) -} - -// This is attached to contexts passed to webhook interfaces when -// the receiver being validated is being created. -type userInfoKey struct{} - -// WithUserInfo is used to note that the webhook is calling within -// the context of a Create operation. -func WithUserInfo(ctx context.Context, ui *authenticationv1.UserInfo) context.Context { - return context.WithValue(ctx, userInfoKey{}, ui) -} - -// GetUserInfo accesses the UserInfo attached to the webhook context. -func GetUserInfo(ctx context.Context) *authenticationv1.UserInfo { - if ui, ok := ctx.Value(userInfoKey{}).(*authenticationv1.UserInfo); ok { - return ui - } - return nil -} diff --git a/vendor/github.com/knative/pkg/controller/OWNERS b/vendor/github.com/knative/pkg/controller/OWNERS deleted file mode 100644 index 39dc9812b..000000000 --- a/vendor/github.com/knative/pkg/controller/OWNERS +++ /dev/null @@ -1,6 +0,0 @@ -# The OWNERS file is used by prow to automatically merge approved PRs. - -approvers: -- mattmoor -- grantr -- tcnghia diff --git a/vendor/github.com/knative/pkg/logging/OWNERS b/vendor/github.com/knative/pkg/logging/OWNERS deleted file mode 100644 index f51b91be7..000000000 --- a/vendor/github.com/knative/pkg/logging/OWNERS +++ /dev/null @@ -1,6 +0,0 @@ -# The OWNERS file is used by prow to automatically merge approved PRs. - -approvers: -- mdemirhan -- n3wscott -- yanweiguo diff --git a/vendor/github.com/knative/pkg/test/OWNERS b/vendor/github.com/knative/pkg/test/OWNERS deleted file mode 100644 index ed29d4015..000000000 --- a/vendor/github.com/knative/pkg/test/OWNERS +++ /dev/null @@ -1,7 +0,0 @@ -# The OWNERS file is used by prow to automatically merge approved PRs. - -approvers: -- adrcunha -- jessiezcc -- srinivashegde86 -- steuhs diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/config/default.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/config/default.go index ba591d4fe..c6d5e8f4f 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/config/default.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/config/default.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Tekton Authors. +Copyright 2019 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,6 +19,7 @@ package config import ( "fmt" "strconv" + "time" corev1 "k8s.io/api/core/v1" ) @@ -27,6 +28,7 @@ const ( // ConfigName is the name of the configmap DefaultsConfigName = "config-defaults" DefaultTimeoutMinutes = 60 + NoTimeoutDuration = 0 * time.Minute defaultTimeoutMinutesKey = "default-timeout-minutes" ) diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/config/store.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/config/store.go index d0e0e0c7e..c257b0f33 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/config/store.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/config/store.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Tekton Authors. +Copyright 2019 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,7 +19,7 @@ package config import ( "context" - "github.com/knative/pkg/configmap" + "knative.dev/pkg/configmap" ) type cfgKey struct{} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/config/zz_generated.deepcopy.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/config/zz_generated.deepcopy.go index b9be3f53a..8abb6b777 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/config/zz_generated.deepcopy.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/config/zz_generated.deepcopy.go @@ -16,7 +16,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This file was autogenerated by deepcopy-gen. Do not edit it manually! +// Code generated by deepcopy-gen. DO NOT EDIT. package config diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/register.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/register.go index 45ad33835..012b6fec5 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/register.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/register.go @@ -16,12 +16,25 @@ limitations under the License. package pipeline -// GroupName is the Kubernetes resource group name for Pipeline types. const ( - GroupName = "tekton.dev" - TaskLabelKey = "/task" - TaskRunLabelKey = "/taskRun" - PipelineLabelKey = "/pipeline" - PipelineRunLabelKey = "/pipelineRun" + // GroupName is the Kubernetes resource group name for Pipeline types. + GroupName = "tekton.dev" + + // TaskLabelKey is used as the label identifier for a task + TaskLabelKey = "/task" + + // TaskRunLabelKey is used as the label identifier for a TaskRun + TaskRunLabelKey = "/taskRun" + + // PipelineLabelKey is used as the label identifier for a Pipeline + PipelineLabelKey = "/pipeline" + + // PipelineRunLabelKey is used as the label identifier for a PipelineRun + PipelineRunLabelKey = "/pipelineRun" + + // PipelineRunLabelKey is used as the label identifier for a PipelineTask PipelineTaskLabelKey = "/pipelineTask" + + // ConditionCheck is used as the label identifier for a ConditionCheck + ConditionCheckKey = "/conditionCheck" ) diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/artifact_bucket.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/artifact_bucket.go index b243f801e..99e576f24 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/artifact_bucket.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/artifact_bucket.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Tekton Authors. +Copyright 2019 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -53,9 +53,7 @@ const ( ArtifactStoragePVCType = "pvc" ) -var ( - secretVolumeMountPath = "/var/bucketsecret" -) +var secretVolumeMountPath = "/var/bucketsecret" // ArtifactBucket contains the Storage bucket configuration defined in the // Bucket config map. @@ -75,43 +73,43 @@ func (b *ArtifactBucket) StorageBasePath(pr *PipelineRun) string { return fmt.Sprintf("%s-%s-bucket", pr.Name, pr.Namespace) } -// GetCopyFromStorageToContainerSpec returns a container used to download artifacts from temporary storage -func (b *ArtifactBucket) GetCopyFromStorageToContainerSpec(name, sourcePath, destinationPath string) []corev1.Container { +// GetCopyFromStorageToSteps returns a container used to download artifacts from temporary storage +func (b *ArtifactBucket) GetCopyFromStorageToSteps(name, sourcePath, destinationPath string) []Step { args := []string{"-args", fmt.Sprintf("cp -P -r %s %s", fmt.Sprintf("%s/%s/*", b.Location, sourcePath), destinationPath)} envVars, secretVolumeMount := getSecretEnvVarsAndVolumeMounts("bucket", secretVolumeMountPath, b.Secrets) - return []corev1.Container{{ + return []Step{{Container: corev1.Container{ Name: names.SimpleNameGenerator.RestrictLengthWithRandomSuffix(fmt.Sprintf("artifact-dest-mkdir-%s", name)), Image: *BashNoopImage, Command: []string{"/ko-app/bash"}, Args: []string{ "-args", strings.Join([]string{"mkdir", "-p", destinationPath}, " "), }, - }, { + }}, {Container: corev1.Container{ Name: names.SimpleNameGenerator.RestrictLengthWithRandomSuffix(fmt.Sprintf("artifact-copy-from-%s", name)), Image: *gsutilImage, Command: []string{"/ko-app/gsutil"}, Args: args, Env: envVars, VolumeMounts: secretVolumeMount, - }} + }}} } -// GetCopyToStorageFromContainerSpec returns a container used to upload artifacts for temporary storage -func (b *ArtifactBucket) GetCopyToStorageFromContainerSpec(name, sourcePath, destinationPath string) []corev1.Container { +// GetCopyToStorageFromSteps returns a container used to upload artifacts for temporary storage +func (b *ArtifactBucket) GetCopyToStorageFromSteps(name, sourcePath, destinationPath string) []Step { args := []string{"-args", fmt.Sprintf("cp -P -r %s %s", sourcePath, fmt.Sprintf("%s/%s", b.Location, destinationPath))} envVars, secretVolumeMount := getSecretEnvVarsAndVolumeMounts("bucket", secretVolumeMountPath, b.Secrets) - return []corev1.Container{{ + return []Step{{Container: corev1.Container{ Name: names.SimpleNameGenerator.RestrictLengthWithRandomSuffix(fmt.Sprintf("artifact-copy-to-%s", name)), Image: *gsutilImage, Command: []string{"/ko-app/gsutil"}, Args: args, Env: envVars, VolumeMounts: secretVolumeMount, - }} + }}} } // GetSecretsVolumes returns the list of volumes for secrets to be mounted diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/artifact_pvc.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/artifact_pvc.go index 484e70cdb..3d90afa50 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/artifact_pvc.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/artifact_pvc.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Tekton Authors. +Copyright 2019 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -47,28 +47,27 @@ func (p *ArtifactPVC) StorageBasePath(pr *PipelineRun) string { return pvcDir } -// GetCopyFromStorageToContainerSpec returns a container used to download artifacts from temporary storage -func (p *ArtifactPVC) GetCopyFromStorageToContainerSpec(name, sourcePath, destinationPath string) []corev1.Container { - return []corev1.Container{{ +// GetCopyFromStorageToSteps returns a container used to download artifacts from temporary storage +func (p *ArtifactPVC) GetCopyFromStorageToSteps(name, sourcePath, destinationPath string) []Step { + return []Step{{Container: corev1.Container{ Name: names.SimpleNameGenerator.RestrictLengthWithRandomSuffix(fmt.Sprintf("source-copy-%s", name)), Image: *BashNoopImage, Command: []string{"/ko-app/bash"}, Args: []string{"-args", strings.Join([]string{"cp", "-r", fmt.Sprintf("%s/.", sourcePath), destinationPath}, " ")}, - }} + }}} } -// GetCopyToStorageFromContainerSpec returns a container used to upload artifacts for temporary storage -func (p *ArtifactPVC) GetCopyToStorageFromContainerSpec(name, sourcePath, destinationPath string) []corev1.Container { - return []corev1.Container{{ +// GetCopyToStorageFromSteps returns a container used to upload artifacts for temporary storage +func (p *ArtifactPVC) GetCopyToStorageFromSteps(name, sourcePath, destinationPath string) []Step { + return []Step{{Container: corev1.Container{ Name: names.SimpleNameGenerator.RestrictLengthWithRandomSuffix(fmt.Sprintf("source-mkdir-%s", name)), Image: *BashNoopImage, Command: []string{"/ko-app/bash"}, Args: []string{ - "-args", strings.Join([]string{"mkdir", "-p", destinationPath}, " "), }, VolumeMounts: []corev1.VolumeMount{GetPvcMount(p.Name)}, - }, { + }}, {Container: corev1.Container{ Name: names.SimpleNameGenerator.RestrictLengthWithRandomSuffix(fmt.Sprintf("source-copy-%s", name)), Image: *BashNoopImage, Command: []string{"/ko-app/bash"}, @@ -76,7 +75,7 @@ func (p *ArtifactPVC) GetCopyToStorageFromContainerSpec(name, sourcePath, destin "-args", strings.Join([]string{"cp", "-r", fmt.Sprintf("%s/.", sourcePath), destinationPath}, " "), }, VolumeMounts: []corev1.VolumeMount{GetPvcMount(p.Name)}, - }} + }}} } // GetPvcMount returns a mounting of the volume with the mount path /pvc @@ -87,18 +86,16 @@ func GetPvcMount(name string) corev1.VolumeMount { } } -// CreateDirContainer returns a container step to create a dir -func CreateDirContainer(name, destinationPath string) corev1.Container { - return corev1.Container{ - Name: names.SimpleNameGenerator.RestrictLengthWithRandomSuffix(fmt.Sprintf("create-dir-%s", name)), +// CreateDirStep returns a container step to create a dir +func CreateDirStep(name, destinationPath string) Step { + return Step{Container: corev1.Container{ + Name: names.SimpleNameGenerator.RestrictLengthWithRandomSuffix(fmt.Sprintf("create-dir-%s", strings.ToLower(name))), Image: *BashNoopImage, Command: []string{"/ko-app/bash"}, Args: []string{"-args", strings.Join([]string{"mkdir", "-p", destinationPath}, " ")}, - } + }} } -// GetSecretsVolumes returns the list of volumes for secrets to be mounted -// on pod -func (p *ArtifactPVC) GetSecretsVolumes() []corev1.Volume { - return nil -} +// GetSecretsVolumes returns the list of volumes for secrets to be mounted on +// pod. +func (p *ArtifactPVC) GetSecretsVolumes() []corev1.Volume { return nil } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/build_gcs_resource.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/build_gcs_resource.go index 0dd6782b6..99dcbda24 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/build_gcs_resource.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/build_gcs_resource.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Tekton Authors. +Copyright 2019 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -53,11 +53,10 @@ const ( // BuildGCSResource does incremental uploads for files in directory. type BuildGCSResource struct { - Name string - Type PipelineResourceType - Location string - DestinationDir string - ArtifactType GCSArtifactType + Name string + Type PipelineResourceType + Location string + ArtifactType GCSArtifactType } // NewBuildGCSResource creates a new BuildGCS resource to pass to a Task @@ -116,48 +115,42 @@ func (s *BuildGCSResource) Replacements() map[string]string { "name": s.Name, "type": string(s.Type), "location": s.Location, - "path": s.DestinationDir, } } -// SetDestinationDirectory sets the destination directory at runtime like where is the resource going to be copied to -func (s *BuildGCSResource) SetDestinationDirectory(destDir string) { s.DestinationDir = destDir } - -// GetDownloadContainerSpec returns an array of container specs to download gcs storage object -func (s *BuildGCSResource) GetDownloadContainerSpec() ([]corev1.Container, error) { - if s.DestinationDir == "" { - return nil, xerrors.Errorf("BuildGCSResource: Expect Destination Directory param to be set %s", s.Name) - } +// GetDownloadSteps returns an array of container specs to download gcs storage object +func (s *BuildGCSResource) GetDownloadSteps(sourcePath string) ([]Step, error) { args := []string{"--type", string(s.ArtifactType), "--location", s.Location} // dest_dir is the destination directory for GCS files to be copies" - if s.DestinationDir != "" { - args = append(args, "--dest_dir", s.DestinationDir) + if sourcePath != "" { + args = append(args, "--dest_dir", sourcePath) } - return []corev1.Container{ - CreateDirContainer(s.Name, s.DestinationDir), { + return []Step{ + CreateDirStep(s.Name, sourcePath), + {Container: corev1.Container{ Name: names.SimpleNameGenerator.RestrictLengthWithRandomSuffix(fmt.Sprintf("storage-fetch-%s", s.Name)), Image: *buildGCSFetcherImage, Args: args, - }}, nil + }}}, nil } -// GetUploadContainerSpec gets container spec for gcs resource to be uploaded like +// GetUploadSteps gets container spec for gcs resource to be uploaded like // set environment variable from secret params and set volume mounts for those secrets -func (s *BuildGCSResource) GetUploadContainerSpec() ([]corev1.Container, error) { +func (s *BuildGCSResource) GetUploadSteps(sourcePath string) ([]Step, error) { if s.ArtifactType != GCSManifest { return nil, xerrors.Errorf("BuildGCSResource: Can only upload Artifacts of type Manifest: %s", s.Name) } - if s.DestinationDir == "" { + if sourcePath == "" { return nil, xerrors.Errorf("BuildGCSResource: Expect Destination Directory param to be set %s", s.Name) } - args := []string{"--location", s.Location, "--dir", s.DestinationDir} + args := []string{"--location", s.Location, "--dir", sourcePath} - return []corev1.Container{{ + return []Step{{Container: corev1.Container{ Name: names.SimpleNameGenerator.RestrictLengthWithRandomSuffix(fmt.Sprintf("storage-upload-%s", s.Name)), Image: *buildGCSUploaderImage, Args: args, - }}, nil + }}}, nil } func getArtifactType(val string) (GCSArtifactType, error) { @@ -173,3 +166,11 @@ func getArtifactType(val string) (GCSArtifactType, error) { } return "", xerrors.Errorf("Invalid ArtifactType %s. Should be one of %s", aType, strings.Join(valid, ",")) } + +func (s *BuildGCSResource) GetUploadVolumeSpec(spec *TaskSpec) ([]corev1.Volume, error) { + return getStorageUploadVolumeSpec(s, spec) +} + +func (s *BuildGCSResource) GetDownloadVolumeSpec(spec *TaskSpec) ([]corev1.Volume, error) { + return getStorageUploadVolumeSpec(s, spec) +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/cloud_event_resource.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/cloud_event_resource.go new file mode 100644 index 000000000..71d53d593 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/cloud_event_resource.go @@ -0,0 +1,88 @@ +/* +Copyright 2019 The Tekton Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "fmt" + "strings" + + corev1 "k8s.io/api/core/v1" +) + +// CloudEventResource is an event sink to which events are delivered when a TaskRun has finished +type CloudEventResource struct { + // Name is the name used to reference to the PipelineResource + Name string `json:"name"` + // Type must be `PipelineResourceTypeCloudEvent` + Type PipelineResourceType `json:"type"` + // TargetURI is the URI of the sink which the cloud event is develired to + TargetURI string `json:"targetURI"` +} + +// NewCloudEventResource creates a new CloudEvent resource to pass to a Task +func NewCloudEventResource(r *PipelineResource) (*CloudEventResource, error) { + if r.Spec.Type != PipelineResourceTypeCloudEvent { + return nil, fmt.Errorf("CloudEventResource: Cannot create a Cloud Event resource from a %s Pipeline Resource", r.Spec.Type) + } + var targetURI string + var targetURISpecified bool + + for _, param := range r.Spec.Params { + switch { + case strings.EqualFold(param.Name, "TargetURI"): + targetURI = param.Value + if param.Value != "" { + targetURISpecified = true + } + } + } + + if !targetURISpecified { + return nil, fmt.Errorf("CloudEventResource: Need URI to be specified in order to create a CloudEvent resource %s", r.Name) + } + return &CloudEventResource{ + Name: r.Name, + Type: r.Spec.Type, + TargetURI: targetURI, + }, nil +} + +// GetName returns the name of the resource +func (s CloudEventResource) GetName() string { + return s.Name +} + +// GetType returns the type of the resource, in this case "cloudEvent" +func (s CloudEventResource) GetType() PipelineResourceType { + return PipelineResourceTypeCloudEvent +} + +// Replacements is used for template replacement on an CloudEventResource inside of a Taskrun. +func (s *CloudEventResource) Replacements() map[string]string { + return map[string]string{ + "name": s.Name, + "type": string(s.Type), + "target-uri": s.TargetURI, + } +} + +func (s *CloudEventResource) GetUploadSteps(string) ([]Step, error) { return nil, nil } +func (s *CloudEventResource) GetDownloadSteps(string) ([]Step, error) { return nil, nil } +func (s *CloudEventResource) GetUploadVolumeSpec(*TaskSpec) ([]corev1.Volume, error) { return nil, nil } +func (s *CloudEventResource) GetDownloadVolumeSpec(*TaskSpec) ([]corev1.Volume, error) { + return nil, nil +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/cluster_resource.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/cluster_resource.go index 70d9a2b3f..3ae7194f9 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/cluster_resource.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/cluster_resource.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Tekton Authors. +Copyright 2019 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -138,13 +138,9 @@ func (s ClusterResource) String() string { return string(json) } -func (s *ClusterResource) GetUploadContainerSpec() ([]corev1.Container, error) { - return nil, nil -} +func (s *ClusterResource) GetUploadSteps(string) ([]Step, error) { return nil, nil } -func (s *ClusterResource) SetDestinationDirectory(path string) { -} -func (s *ClusterResource) GetDownloadContainerSpec() ([]corev1.Container, error) { +func (s *ClusterResource) GetDownloadSteps(sourcePath string) ([]Step, error) { var envVars []corev1.EnvVar for _, sec := range s.Secrets { ev := corev1.EnvVar{ @@ -160,8 +156,7 @@ func (s *ClusterResource) GetDownloadContainerSpec() ([]corev1.Container, error) } envVars = append(envVars, ev) } - - clusterContainer := corev1.Container{ + return []Step{{Container: corev1.Container{ Name: names.SimpleNameGenerator.RestrictLengthWithRandomSuffix("kubeconfig"), Image: *kubeconfigWriterImage, Command: []string{"/ko-app/kubeconfigwriter"}, @@ -169,7 +164,8 @@ func (s *ClusterResource) GetDownloadContainerSpec() ([]corev1.Container, error) "-clusterConfig", s.String(), }, Env: envVars, - } - - return []corev1.Container{clusterContainer}, nil + }}}, nil } + +func (s *ClusterResource) GetUploadVolumeSpec(*TaskSpec) ([]corev1.Volume, error) { return nil, nil } +func (s *ClusterResource) GetDownloadVolumeSpec(*TaskSpec) ([]corev1.Volume, error) { return nil, nil } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/cluster_task_defaults.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/cluster_task_defaults.go new file mode 100644 index 000000000..ad01f6eb8 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/cluster_task_defaults.go @@ -0,0 +1,25 @@ +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "context" +) + +func (t *ClusterTask) SetDefaults(ctx context.Context) { + t.Spec.SetDefaults(ctx) +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/cluster_task_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/cluster_task_types.go index 6eee3d769..fa3df09e3 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/cluster_task_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/cluster_task_types.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Tekton Authors. +Copyright 2019 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -17,28 +17,10 @@ limitations under the License. package v1alpha1 import ( - "context" - - "github.com/knative/pkg/apis" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "knative.dev/pkg/apis" ) -func (t *ClusterTask) TaskSpec() TaskSpec { - return t.Spec -} - -func (t *ClusterTask) TaskMetadata() metav1.ObjectMeta { - return t.ObjectMeta -} - -func (t *ClusterTask) Copy() TaskInterface { - return t.DeepCopy() -} - -func (t *ClusterTask) SetDefaults(ctx context.Context) { - t.Spec.SetDefaults(ctx) -} - // Check that Task may be validated and defaulted. var _ apis.Validatable = (*ClusterTask)(nil) var _ apis.Defaultable = (*ClusterTask)(nil) @@ -70,3 +52,15 @@ type ClusterTaskList struct { metav1.ListMeta `json:"metadata,omitempty"` Items []ClusterTask `json:"items"` } + +func (t *ClusterTask) TaskSpec() TaskSpec { + return t.Spec +} + +func (t *ClusterTask) TaskMetadata() metav1.ObjectMeta { + return t.ObjectMeta +} + +func (t *ClusterTask) Copy() TaskInterface { + return t.DeepCopy() +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/cluster_task_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/cluster_task_validation.go index bd64271c3..f6a2bd88d 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/cluster_task_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/cluster_task_validation.go @@ -19,7 +19,7 @@ package v1alpha1 import ( "context" - "github.com/knative/pkg/apis" + "knative.dev/pkg/apis" ) func (t *ClusterTask) Validate(ctx context.Context) *apis.FieldError { diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/condition_defaults.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/condition_defaults.go new file mode 100644 index 000000000..7e30a4174 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/condition_defaults.go @@ -0,0 +1,26 @@ +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import "context" + +func (c *Condition) SetDefaults(ctx context.Context) { + c.Spec.SetDefaults(ctx) +} + +func (cs *ConditionSpec) SetDefaults(ctx context.Context) { +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/condition_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/condition_types.go new file mode 100644 index 000000000..7d7e45dae --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/condition_types.go @@ -0,0 +1,108 @@ +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "knative.dev/pkg/apis" + duckv1beta1 "knative.dev/pkg/apis/duck/v1beta1" +) + +// Check that Task may be validated and defaulted. +var _ apis.Validatable = (*Condition)(nil) +var _ apis.Defaultable = (*Condition)(nil) + +// +genclient +// +genclient:noStatus +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Condition declares a step that is used to gate the execution of a Task in a Pipeline. +// A condition execution (ConditionCheck) evaluates to either true or false +// +k8s:openapi-gen=true +type Condition struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata"` + + // Spec holds the desired state of the Condition from the client + // +optional + Spec ConditionSpec `json:"spec"` +} + +// ConditionCheckStatus defines the observed state of ConditionCheck +type ConditionCheckStatus struct { + duckv1beta1.Status `json:",inline"` + + // PodName is the name of the pod responsible for executing this condition check. + PodName string `json:"podName"` + + // StartTime is the time the check is actually started. + // +optional + StartTime *metav1.Time `json:"startTime,omitempty"` + + // CompletionTime is the time the check pod completed. + // +optional + CompletionTime *metav1.Time `json:"completionTime,omitempty"` + + // Check describes the state of the check container. + // +optional + Check corev1.ContainerState `json:"check,omitempty"` +} + +// ConditionSpec defines the desired state of the Condition +type ConditionSpec struct { + // Check declares container whose exit code determines where a condition is true or false + Check corev1.Container `json:"check,omitempty"` + + // Params is an optional set of parameters which must be supplied by the user when a Condition + // is evaluated + // +optional + Params []ParamSpec `json:"params,omitempty"` +} + +// ConditionCheck represents a single evaluation of a Condition step. +type ConditionCheck TaskRun + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ConditionList contains a list of Conditions +type ConditionList struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + Items []Condition `json:"items"` +} + +func NewConditionCheck(tr *TaskRun) *ConditionCheck { + if tr == nil { + return nil + } + + cc := ConditionCheck(*tr) + return &cc +} + +// IsDone returns true if the ConditionCheck's status indicates that it is done. +func (cc *ConditionCheck) IsDone() bool { + return !cc.Status.GetCondition(apis.ConditionSucceeded).IsUnknown() +} + +// IsSuccessful returns true if the ConditionCheck's status indicates that it is done. +func (cc *ConditionCheck) IsSuccessful() bool { + return cc.Status.GetCondition(apis.ConditionSucceeded).IsTrue() +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/condition_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/condition_validation.go new file mode 100644 index 000000000..acb965cf4 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/condition_validation.go @@ -0,0 +1,42 @@ +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "context" + + "k8s.io/apimachinery/pkg/api/equality" + "knative.dev/pkg/apis" +) + +func (c Condition) Validate(ctx context.Context) *apis.FieldError { + if err := validateObjectMetadata(c.GetObjectMeta()); err != nil { + return err.ViaField("metadata") + } + return c.Spec.Validate(ctx).ViaField("Spec") +} + +func (cs *ConditionSpec) Validate(ctx context.Context) *apis.FieldError { + if equality.Semantic.DeepEqual(cs, ConditionSpec{}) { + return apis.ErrMissingField(apis.CurrentField) + } + + if cs.Check.Image == "" { + return apis.ErrMissingField("Check.Image") + } + return nil +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/contexts.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/contexts.go index 7edc042d5..1b7daa482 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/contexts.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/contexts.go @@ -28,3 +28,27 @@ type hdcnKey struct{} func WithDefaultConfigurationName(ctx context.Context) context.Context { return context.WithValue(ctx, hdcnKey{}, struct{}{}) } + +// HasDefaultConfigurationName checks to see whether the given context has +// been marked as having a default configurationName. +func HasDefaultConfigurationName(ctx context.Context) bool { + return ctx.Value(hdcnKey{}) != nil +} + +// lemonadeKey is used as the key for associating information +// with a context.Context. This variable doesn't really matter, so it's +// a total random name (for history purpose, used lemonade as it was written +// in an hot summer day). +type lemonadeKey struct{} + +// WithUpgradeViaDefaulting notes on the context that we want defaulting to rewrite +// from v1alpha1 pre-defaults to v1alpha1 post-defaults. +func WithUpgradeViaDefaulting(ctx context.Context) context.Context { + return context.WithValue(ctx, lemonadeKey{}, struct{}{}) +} + +// IsUpgradeViaDefaulting checks whether we should be "defaulting" from v1alpha1 pre-defaults to +// the v1alpha1 post-defaults subset. +func IsUpgradeViaDefaulting(ctx context.Context) bool { + return ctx.Value(lemonadeKey{}) != nil +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/doc.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/doc.go index 647707972..288d138dd 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/doc.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Tekton Authors. +Copyright 2019 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/gcs_resource.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/gcs_resource.go index f963a0c3b..24064273a 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/gcs_resource.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/gcs_resource.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Tekton Authors. +Copyright 2019 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -35,11 +35,10 @@ var ( // GCSResource is a GCS endpoint from which to get artifacts which is required // by a Build/Task for context (e.g. a archive from which to build an image). type GCSResource struct { - Name string `json:"name"` - Type PipelineResourceType `json:"type"` - Location string `json:"location"` - TypeDir bool `json:"typeDir"` - DestinationDir string `json:"destinationDir"` + Name string `json:"name"` + Type PipelineResourceType `json:"type"` + Location string `json:"location"` + TypeDir bool `json:"typeDir"` //Secret holds a struct to indicate a field name and corresponding secret name to populate it Secrets []SecretParam `json:"secrets"` } @@ -95,58 +94,60 @@ func (s *GCSResource) Replacements() map[string]string { "name": s.Name, "type": string(s.Type), "location": s.Location, - "path": s.DestinationDir, } } -// SetDestinationDirectory sets the destination directory at runtime like where is the resource going to be copied to -func (s *GCSResource) SetDestinationDirectory(destDir string) { s.DestinationDir = destDir } - -// GetUploadContainerSpec gets container spec for gcs resource to be uploaded like +// GetUploadSteps gets container spec for gcs resource to be uploaded like // set environment variable from secret params and set volume mounts for those secrets -func (s *GCSResource) GetUploadContainerSpec() ([]corev1.Container, error) { - if s.DestinationDir == "" { - return nil, xerrors.Errorf("GCSResource: Expect Destination Directory param to be set: %s", s.Name) - } +func (s *GCSResource) GetUploadSteps(sourcePath string) ([]Step, error) { var args []string if s.TypeDir { - args = []string{"-args", fmt.Sprintf("rsync -d -r %s %s", s.DestinationDir, s.Location)} + args = []string{"-args", fmt.Sprintf("rsync -d -r %s %s", sourcePath, s.Location)} } else { - args = []string{"-args", fmt.Sprintf("cp %s %s", filepath.Join(s.DestinationDir, "*"), s.Location)} + args = []string{"-args", fmt.Sprintf("cp %s %s", filepath.Join(sourcePath, "*"), s.Location)} } envVars, secretVolumeMount := getSecretEnvVarsAndVolumeMounts(s.Name, gcsSecretVolumeMountPath, s.Secrets) - return []corev1.Container{{ + return []Step{{Container: corev1.Container{ Name: names.SimpleNameGenerator.RestrictLengthWithRandomSuffix(fmt.Sprintf("upload-%s", s.Name)), Image: *gsutilImage, Command: []string{"/ko-app/gsutil"}, Args: args, VolumeMounts: secretVolumeMount, Env: envVars, - }}, nil + }}}, nil } -// GetDownloadContainerSpec returns an array of container specs to download gcs storage object -func (s *GCSResource) GetDownloadContainerSpec() ([]corev1.Container, error) { - if s.DestinationDir == "" { +// GetDownloadSteps returns an array of container specs to download gcs storage object +func (s *GCSResource) GetDownloadSteps(sourcePath string) ([]Step, error) { + if sourcePath == "" { return nil, xerrors.Errorf("GCSResource: Expect Destination Directory param to be set %s", s.Name) } var args []string if s.TypeDir { - args = []string{"-args", fmt.Sprintf("rsync -d -r %s %s", s.Location, s.DestinationDir)} + args = []string{"-args", fmt.Sprintf("rsync -d -r %s %s", s.Location, sourcePath)} } else { - args = []string{"-args", fmt.Sprintf("cp %s %s", s.Location, s.DestinationDir)} + args = []string{"-args", fmt.Sprintf("cp %s %s", s.Location, sourcePath)} } envVars, secretVolumeMount := getSecretEnvVarsAndVolumeMounts(s.Name, gcsSecretVolumeMountPath, s.Secrets) - return []corev1.Container{ - CreateDirContainer(s.Name, s.DestinationDir), { + return []Step{ + CreateDirStep(s.Name, sourcePath), + {Container: corev1.Container{ Name: names.SimpleNameGenerator.RestrictLengthWithRandomSuffix(fmt.Sprintf("fetch-%s", s.Name)), Image: *gsutilImage, Command: []string{"/ko-app/gsutil"}, Args: args, Env: envVars, VolumeMounts: secretVolumeMount, - }}, nil + }}}, nil +} + +func (s *GCSResource) GetUploadVolumeSpec(spec *TaskSpec) ([]corev1.Volume, error) { + return getStorageUploadVolumeSpec(s, spec) +} + +func (s *GCSResource) GetDownloadVolumeSpec(spec *TaskSpec) ([]corev1.Volume, error) { + return getStorageUploadVolumeSpec(s, spec) } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/git_resource.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/git_resource.go index e924fdcbd..5885aa09c 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/git_resource.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/git_resource.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Tekton Authors. +Copyright 2019 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -43,11 +43,10 @@ type GitResource struct { // Git revision (branch, tag, commit SHA or ref) to clone. See // https://git-scm.com/docs/gitrevisions#_specifying_revisions for more // information. - Revision string `json:"revision"` - TargetPath string + Revision string `json:"revision"` } -// NewGitResource create a new git resource to pass to a Task +// NewGitResource creates a new git resource to pass to a Task func NewGitResource(r *PipelineResource) (*GitResource, error) { if r.Spec.Type != PipelineResourceTypeGit { return nil, xerrors.Errorf("GitResource: Cannot create a Git resource from a %s Pipeline Resource", r.Spec.Type) @@ -64,7 +63,7 @@ func NewGitResource(r *PipelineResource) (*GitResource, error) { gitResource.Revision = param.Value } } - // default revision to master is nothing is provided + // default revision to master if nothing is provided if gitResource.Revision == "" { gitResource.Revision = "master" } @@ -93,30 +92,25 @@ func (s *GitResource) Replacements() map[string]string { "type": string(s.Type), "url": s.URL, "revision": s.Revision, - "path": s.TargetPath, } } -func (s *GitResource) GetDownloadContainerSpec() ([]corev1.Container, error) { +func (s *GitResource) GetDownloadSteps(sourcePath string) ([]Step, error) { args := []string{"-url", s.URL, "-revision", s.Revision, } - args = append(args, []string{"-path", s.TargetPath}...) + args = append(args, []string{"-path", sourcePath}...) - return []corev1.Container{{ + return []Step{{Container: corev1.Container{ Name: names.SimpleNameGenerator.RestrictLengthWithRandomSuffix(gitSource + "-" + s.Name), Image: *gitImage, Command: []string{"/ko-app/git-init"}, Args: args, WorkingDir: WorkspaceDir, - }}, nil + }}}, nil } -func (s *GitResource) SetDestinationDirectory(path string) { - s.TargetPath = path -} - -func (s *GitResource) GetUploadContainerSpec() ([]corev1.Container, error) { - return nil, nil -} +func (s *GitResource) GetUploadSteps(sourcePath string) ([]Step, error) { return nil, nil } +func (s *GitResource) GetUploadVolumeSpec(spec *TaskSpec) ([]corev1.Volume, error) { return nil, nil } +func (s *GitResource) GetDownloadVolumeSpec(spec *TaskSpec) ([]corev1.Volume, error) { return nil, nil } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/image_resource.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/image_resource.go index 4d8ff4f38..f5066ab6b 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/image_resource.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/image_resource.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Tekton Authors. +Copyright 2019 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -75,19 +75,10 @@ func (s *ImageResource) Replacements() map[string]string { } } -// GetUploadContainerSpec returns the spec for the upload container -func (s *ImageResource) GetUploadContainerSpec() ([]corev1.Container, error) { - return nil, nil -} - -// GetDownloadContainerSpec returns the spec for the download container -func (s *ImageResource) GetDownloadContainerSpec() ([]corev1.Container, error) { - return nil, nil -} - -// SetDestinationDirectory sets the destination for the resource -func (s *ImageResource) SetDestinationDirectory(path string) { -} +func (s *ImageResource) GetUploadSteps(string) ([]Step, error) { return nil, nil } +func (s *ImageResource) GetDownloadSteps(string) ([]Step, error) { return nil, nil } +func (s *ImageResource) GetUploadVolumeSpec(*TaskSpec) ([]corev1.Volume, error) { return nil, nil } +func (s *ImageResource) GetDownloadVolumeSpec(*TaskSpec) ([]corev1.Volume, error) { return nil, nil } // GetOutputImageDir return the path to get the index.json file func (s *ImageResource) GetOutputImageDir() string { diff --git a/vendor/github.com/tektoncd/pipeline/pkg/merge/merge.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/merge.go similarity index 57% rename from vendor/github.com/tektoncd/pipeline/pkg/merge/merge.go rename to vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/merge.go index 04ece1f22..8566d84aa 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/merge/merge.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/merge.go @@ -1,17 +1,20 @@ /* - Copyright 2019 Knative Authors LLC - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ -package merge +package v1alpha1 import ( "encoding/json" @@ -20,28 +23,30 @@ import ( "k8s.io/apimachinery/pkg/util/strategicpatch" ) -// CombineStepsWithStepTemplate takes a possibly nil container template and a list of step containers, merging each -// of the step containers onto the container template, if it's not nil, and returning the resulting list. -func CombineStepsWithStepTemplate(template *v1.Container, steps []v1.Container) ([]v1.Container, error) { +// MergeStepsWithStepTemplate takes a possibly nil container template and a +// list of steps, merging each of the steps with the container template, if +// it's not nil, and returning the resulting list. +func MergeStepsWithStepTemplate(template *v1.Container, steps []Step) ([]Step, error) { if template == nil { return steps, nil } - // We need JSON bytes to generate a patch to merge the step containers onto the template container, so marshal the template. + // We need JSON bytes to generate a patch to merge the step containers + // onto the template container, so marshal the template. templateAsJSON, err := json.Marshal(template) if err != nil { return nil, err } - // We need to do a three-way merge to actually combine the template and step containers, so we need an empty container - // as the "original" + // We need to do a three-way merge to actually merge the template and + // step containers, so we need an empty container as the "original" emptyAsJSON, err := json.Marshal(&v1.Container{}) if err != nil { return nil, err } for i, s := range steps { - // Marshal the step to JSON - stepAsJSON, err := json.Marshal(s) + // Marshal the step's to JSON + stepAsJSON, err := json.Marshal(s.Container) if err != nil { return nil, err } @@ -79,7 +84,7 @@ func CombineStepsWithStepTemplate(template *v1.Container, steps []v1.Container) merged.Args = []string{} } - steps[i] = *merged + steps[i] = Step{Container: *merged} } return steps, nil } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/metadata_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/metadata_validation.go index 3ac3ea234..9a51a546e 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/metadata_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/metadata_validation.go @@ -1,9 +1,12 @@ /* Copyright 2019 The Tekton Authors + Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -16,8 +19,8 @@ package v1alpha1 import ( "strings" - "github.com/knative/pkg/apis" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "knative.dev/pkg/apis" ) const maxLength = 63 diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/param_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/param_types.go index 0c30fffd5..41574b98b 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/param_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/param_types.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Tekton Authors. +Copyright 2019 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -16,12 +16,22 @@ limitations under the License. package v1alpha1 +import ( + "context" + "encoding/json" + "fmt" +) + // ParamSpec defines arbitrary parameters needed beyond typed inputs (such as // resources). Parameter values are provided by users as inputs on a TaskRun // or PipelineRun. type ParamSpec struct { // Name declares the name by which a parameter is referenced. Name string `json:"name"` + // Type is the user-specified type of the parameter. The possible types + // are currently "string" and "array", and "string" is the default. + // +optional + Type ParamType `json:"type,omitempty"` // Description is a user-facing description of the parameter that may be // used to populate a UI. // +optional @@ -30,11 +40,88 @@ type ParamSpec struct { // default is set, a Task may be executed without a supplied value for the // parameter. // +optional - Default string `json:"default,omitempty"` + Default *ArrayOrString `json:"default,omitempty"` } -// Param declares a value to use for the Param called Name. -type Param struct { +func (pp *ParamSpec) SetDefaults(ctx context.Context) { + if pp != nil && pp.Type == "" { + if pp.Default != nil { + // propagate the parsed ArrayOrString's type to the parent ParamSpec's type + pp.Type = pp.Default.Type + } else { + // ParamTypeString is the default value (when no type can be inferred from the default value) + pp.Type = ParamTypeString + } + } +} + +// ResourceParam declares a string value to use for the parameter called Name, and is used in +// the specific context of PipelineResources. +type ResourceParam struct { Name string `json:"name"` Value string `json:"value"` } + +// Param declares an ArrayOrString to use for the parameter called name. +type Param struct { + Name string `json:"name"` + Value ArrayOrString `json:"value"` +} + +// ParamType indicates the type of an input parameter; +// Used to distinguish between a single string and an array of strings. +type ParamType string + +// Valid ParamTypes: +const ( + ParamTypeString ParamType = "string" + ParamTypeArray ParamType = "array" +) + +// AllParamTypes can be used for ParamType validation. +var AllParamTypes = []ParamType{ParamTypeString, ParamTypeArray} + +// ArrayOrString is modeled after IntOrString in kubernetes/apimachinery: + +// ArrayOrString is a type that can hold a single string or string array. +// Used in JSON unmarshalling so that a single JSON field can accept +// either an individual string or an array of strings. +type ArrayOrString struct { + Type ParamType // Represents the stored type of ArrayOrString. + StringVal string + ArrayVal []string +} + +// UnmarshalJSON implements the json.Unmarshaller interface. +func (arrayOrString *ArrayOrString) UnmarshalJSON(value []byte) error { + if value[0] == '"' { + arrayOrString.Type = ParamTypeString + return json.Unmarshal(value, &arrayOrString.StringVal) + } + arrayOrString.Type = ParamTypeArray + return json.Unmarshal(value, &arrayOrString.ArrayVal) +} + +// MarshalJSON implements the json.Marshaller interface. +func (arrayOrString ArrayOrString) MarshalJSON() ([]byte, error) { + switch arrayOrString.Type { + case ParamTypeString: + return json.Marshal(arrayOrString.StringVal) + case ParamTypeArray: + return json.Marshal(arrayOrString.ArrayVal) + default: + return []byte{}, fmt.Errorf("impossible ArrayOrString.Type: %q", arrayOrString.Type) + } +} + +func (arrayOrString *ArrayOrString) ApplyReplacements(stringReplacements map[string]string, arrayReplacements map[string][]string) { + if arrayOrString.Type == ParamTypeString { + arrayOrString.StringVal = ApplyReplacements(arrayOrString.StringVal, stringReplacements) + } else { + var newArrayVal []string + for _, v := range arrayOrString.ArrayVal { + newArrayVal = append(newArrayVal, ApplyArrayReplacements(v, stringReplacements, arrayReplacements)...) + } + arrayOrString.ArrayVal = newArrayVal + } +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipeline_defaults.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipeline_defaults.go index 1eb539b91..f52d8c195 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipeline_defaults.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipeline_defaults.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Tekton Authors. +Copyright 2019 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -28,4 +28,7 @@ func (ps *PipelineSpec) SetDefaults(ctx context.Context) { pt.TaskRef.Kind = NamespacedTaskKind } } + for i := range ps.Params { + ps.Params[i].SetDefaults(ctx) + } } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipeline_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipeline_types.go index 59d1f83cd..c8968f610 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipeline_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipeline_types.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Tekton Authors. +Copyright 2019 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -17,8 +17,8 @@ limitations under the License. package v1alpha1 import ( - "github.com/knative/pkg/apis" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "knative.dev/pkg/apis" ) // PipelineSpec defines the desired state of Pipeline. @@ -83,6 +83,10 @@ type PipelineTask struct { // TaskRef is a reference to a task definition. TaskRef TaskRef `json:"taskRef"` + // Conditions is a list of conditions that need to be true for the task to run + // +optional + Conditions []PipelineTaskCondition `json:"conditions,omitempty"` + // Retries represents how many times this task should be retried in case of task failure: ConditionSucceeded set to False // +optional Retries int `json:"retries,omitempty"` @@ -107,6 +111,17 @@ type PipelineTaskParam struct { Value string `json:"value"` } +// PipelineTaskCondition allows a PipelineTask to declare a Condition to be evaluated before +// the Task is run. +type PipelineTaskCondition struct { + // ConditionRef is the name of the Condition to use for the conditionCheck + ConditionRef string `json:"conditionRef"` + + // Params declare parameters passed to this Condition + // +optional + Params []Param `json:"params,omitempty"` +} + // PipelineDeclaredResource is used by a Pipeline to declare the types of the // PipelineResources that it will required to run and names which can be used to // refer to these PipelineResources in PipelineTaskResourceBindings. diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipeline_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipeline_validation.go index ce6f6171d..a7d2b3ad4 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipeline_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipeline_validation.go @@ -20,11 +20,10 @@ import ( "context" "fmt" - "github.com/knative/pkg/apis" "github.com/tektoncd/pipeline/pkg/list" - "github.com/tektoncd/pipeline/pkg/templating" "golang.org/x/xerrors" "k8s.io/apimachinery/pkg/api/equality" + "knative.dev/pkg/apis" ) // Validate checks that the Pipeline structure is valid but does not validate @@ -33,7 +32,7 @@ func (p *Pipeline) Validate(ctx context.Context) *apis.FieldError { if err := validateObjectMetadata(p.GetObjectMeta()); err != nil { return err.ViaField("metadata") } - return nil + return p.Spec.Validate(ctx) } func validateDeclaredResources(ps *PipelineSpec) error { @@ -151,17 +150,61 @@ func (ps *PipelineSpec) Validate(ctx context.Context) *apis.FieldError { func validatePipelineParameterVariables(tasks []PipelineTask, params []ParamSpec) *apis.FieldError { parameterNames := map[string]struct{}{} + arrayParameterNames := map[string]struct{}{} + for _, p := range params { + // Verify that p is a valid type. + validType := false + for _, allowedType := range AllParamTypes { + if p.Type == allowedType { + validType = true + } + } + if !validType { + return apis.ErrInvalidValue(string(p.Type), fmt.Sprintf("spec.params.%s.type", p.Name)) + } + + // If a default value is provided, ensure its type matches param's declared type. + if (p.Default != nil) && (p.Default.Type != p.Type) { + return &apis.FieldError{ + Message: fmt.Sprintf( + "\"%v\" type does not match default value's type: \"%v\"", p.Type, p.Default.Type), + Paths: []string{ + fmt.Sprintf("spec.params.%s.type", p.Name), + fmt.Sprintf("spec.params.%s.default.type", p.Name), + }, + } + } + + // Add parameter name to parameterNames, and to arrayParameterNames if type is array. parameterNames[p.Name] = struct{}{} + if p.Type == ParamTypeArray { + arrayParameterNames[p.Name] = struct{}{} + } } - return validatePipelineVariables(tasks, "params", parameterNames) + + return validatePipelineVariables(tasks, "params", parameterNames, arrayParameterNames) } -func validatePipelineVariables(tasks []PipelineTask, prefix string, vars map[string]struct{}) *apis.FieldError { +func validatePipelineVariables(tasks []PipelineTask, prefix string, paramNames map[string]struct{}, arrayParamNames map[string]struct{}) *apis.FieldError { for _, task := range tasks { for _, param := range task.Params { - if err := validatePipelineVariable(fmt.Sprintf("param[%s]", param.Name), param.Value, prefix, vars); err != nil { - return err + if param.Value.Type == ParamTypeString { + if err := validatePipelineVariable(fmt.Sprintf("param[%s]", param.Name), param.Value.StringVal, prefix, paramNames); err != nil { + return err + } + if err := validatePipelineNoArrayReferenced(fmt.Sprintf("param[%s]", param.Name), param.Value.StringVal, prefix, arrayParamNames); err != nil { + return err + } + } else { + for _, arrayElement := range param.Value.ArrayVal { + if err := validatePipelineVariable(fmt.Sprintf("param[%s]", param.Name), arrayElement, prefix, paramNames); err != nil { + return err + } + if err := validatePipelineArraysIsolated(fmt.Sprintf("param[%s]", param.Name), arrayElement, prefix, arrayParamNames); err != nil { + return err + } + } } } } @@ -169,5 +212,13 @@ func validatePipelineVariables(tasks []PipelineTask, prefix string, vars map[str } func validatePipelineVariable(name, value, prefix string, vars map[string]struct{}) *apis.FieldError { - return templating.ValidateVariable(name, value, prefix, "", "task parameter", "pipelinespec.params", vars) + return ValidateVariable(name, value, prefix, "", "task parameter", "pipelinespec.params", vars) +} + +func validatePipelineNoArrayReferenced(name, value, prefix string, vars map[string]struct{}) *apis.FieldError { + return ValidateVariableProhibited(name, value, prefix, "", "task parameter", "pipelinespec.params", vars) +} + +func validatePipelineArraysIsolated(name, value, prefix string, vars map[string]struct{}) *apis.FieldError { + return ValidateVariableIsolated(name, value, prefix, "", "task parameter", "pipelinespec.params", vars) } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipelineresource_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipelineresource_validation.go index 147f28c2e..ead5b72cc 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipelineresource_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipelineresource_validation.go @@ -20,8 +20,8 @@ import ( "context" "strings" - "github.com/knative/pkg/apis" "k8s.io/apimachinery/pkg/api/equality" + "knative.dev/pkg/apis" ) func (r *PipelineResource) Validate(ctx context.Context) *apis.FieldError { diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipelinerun_defaults.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipelinerun_defaults.go index 5f18fb20c..b24760aeb 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipelinerun_defaults.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipelinerun_defaults.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Tekton Authors. +Copyright 2019 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -31,6 +31,15 @@ func (pr *PipelineRun) SetDefaults(ctx context.Context) { func (prs *PipelineRunSpec) SetDefaults(ctx context.Context) { cfg := config.FromContextOrDefaults(ctx) if prs.Timeout == nil { - prs.Timeout = &metav1.Duration{Duration: time.Duration(cfg.Defaults.DefaultTimeoutMinutes) * time.Minute} + var timeout *metav1.Duration + if IsUpgradeViaDefaulting(ctx) { + // This case is for preexisting `TaskRun` before 0.5.0, so let's + // add the old default timeout. + // Most likely those TaskRun passing here are already done and/or already running + timeout = &metav1.Duration{Duration: config.DefaultTimeoutMinutes * time.Minute} + } else { + timeout = &metav1.Duration{Duration: time.Duration(cfg.Defaults.DefaultTimeoutMinutes) * time.Minute} + } + prs.Timeout = timeout } } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipelinerun_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipelinerun_types.go index d4b937265..2e918423e 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipelinerun_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipelinerun_types.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Tekton Authors. +Copyright 2019 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -20,11 +20,11 @@ import ( "fmt" "time" - "github.com/knative/pkg/apis" - duckv1beta1 "github.com/knative/pkg/apis/duck/v1beta1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" + "knative.dev/pkg/apis" + duckv1beta1 "knative.dev/pkg/apis/duck/v1beta1" ) var ( @@ -62,6 +62,11 @@ type PipelineRunSpec struct { // Refer to Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration // +optional Timeout *metav1.Duration `json:"timeout,omitempty"` + + // PodTemplate holds pod specific configuration + PodTemplate PodTemplate `json:"podTemplate,omitempty"` + + // FIXME(vdemeester) Deprecated // NodeSelector is a selector which must be true for the pod to fit on a node. // Selector which must match a node's labels for the pod to be scheduled on that node. // More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ @@ -131,6 +136,17 @@ type PipelineRunTaskRunStatus struct { // Status is the TaskRunStatus for the corresponding TaskRun // +optional Status *TaskRunStatus `json:"status,omitempty"` + // ConditionChecks maps the name of a condition check to its Status + // +optional + ConditionChecks map[string]*PipelineRunConditionCheckStatus `json:"conditionChecks,omitempty"` +} + +type PipelineRunConditionCheckStatus struct { + // ConditionName is the name of the Condition + ConditionName string `json:"conditionName,omitempty"` + // Status is the ConditionCheckStatus for the corresponding ConditionCheck + // +optional + Status *ConditionCheckStatus `json:"status,omitempty"` } var pipelineRunCondSet = apis.NewBatchConditionSet() @@ -240,3 +256,18 @@ func (pr *PipelineRun) IsCancelled() bool { func (pr *PipelineRun) GetRunKey() string { return fmt.Sprintf("%s/%s/%s", pipelineRunControllerName, pr.Namespace, pr.Name) } + +// IsTimedOut returns true if a pipelinerun has exceeded its spec.Timeout based on its status.Timeout +func (pr *PipelineRun) IsTimedOut() bool { + pipelineTimeout := pr.Spec.Timeout + startTime := pr.Status.StartTime + + if !startTime.IsZero() && pipelineTimeout != nil { + timeout := pipelineTimeout.Duration + runtime := time.Since(startTime.Time) + if runtime > timeout { + return true + } + } + return false +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipelinerun_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipelinerun_validation.go index fba755b9c..5305f20a2 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipelinerun_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipelinerun_validation.go @@ -20,8 +20,8 @@ import ( "context" "fmt" - "github.com/knative/pkg/apis" "k8s.io/apimachinery/pkg/api/equality" + "knative.dev/pkg/apis" ) // Validate pipelinerun @@ -51,8 +51,8 @@ func (ps *PipelineRunSpec) Validate(ctx context.Context) *apis.FieldError { if ps.Timeout != nil { // timeout should be a valid duration of at least 0. - if ps.Timeout.Duration <= 0 { - return apis.ErrInvalidValue(fmt.Sprintf("%s should be > 0", ps.Timeout.Duration.String()), "spec.timeout") + if ps.Timeout.Duration < 0 { + return apis.ErrInvalidValue(fmt.Sprintf("%s should be >= 0", ps.Timeout.Duration.String()), "spec.timeout") } } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pod.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pod.go new file mode 100644 index 000000000..f348a788e --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pod.go @@ -0,0 +1,62 @@ +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + corev1 "k8s.io/api/core/v1" +) + +// PodTemplate holds pod specific configuration +type PodTemplate struct { + // NodeSelector is a selector which must be true for the pod to fit on a node. + // Selector which must match a node's labels for the pod to be scheduled on that node. + // More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + // +optional + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + + // If specified, the pod's tolerations. + // +optional + Tolerations []corev1.Toleration `json:"tolerations,omitempty"` + + // If specified, the pod's scheduling constraints + // +optional + Affinity *corev1.Affinity `json:"affinity,omitempty"` + + // SecurityContext holds pod-level security attributes and common container settings. + // Optional: Defaults to empty. See type description for default values of each field. + // +optional + SecurityContext *corev1.PodSecurityContext `json:"securityContext,omitempty"` + + // List of volumes that can be mounted by containers belonging to the pod. + // More info: https://kubernetes.io/docs/concepts/storage/volumes + // +optional + Volumes []corev1.Volume `json:"volumes,omitempty" patchStrategy:"merge,retainKeys" patchMergeKey:"name" protobuf:"bytes,1,rep,name=volumes"` +} + +// CombinePodTemplate takes a PodTemplate (either from TaskRun or PipelineRun) and merge it with deprecated field that were inlined. +func CombinedPodTemplate(template PodTemplate, deprecatedNodeSelector map[string]string, deprecatedTolerations []corev1.Toleration, deprecatedAffinity *corev1.Affinity) PodTemplate { + if len(template.NodeSelector) == 0 && len(deprecatedNodeSelector) != 0 { + template.NodeSelector = deprecatedNodeSelector + } + if len(template.Tolerations) == 0 && len(deprecatedTolerations) != 0 { + template.Tolerations = deprecatedTolerations + } + if template.Affinity == nil && deprecatedAffinity != nil { + template.Affinity = deprecatedAffinity + } + return template +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pull_request_resource.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pull_request_resource.go index f10c73a99..b202fd19d 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pull_request_resource.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pull_request_resource.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Tekton Authors. +Copyright 2019 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -42,7 +42,6 @@ type PullRequestResource struct { Name string `json:"name"` Type PipelineResourceType `json:"type"` - DestinationDir string `json:"destinationDir"` // GitHub URL pointing to the pull request. // Example: https://github.com/owner/repo/pulls/1 URL string `json:"url"` @@ -94,16 +93,15 @@ func (s *PullRequestResource) Replacements() map[string]string { } } -func (s *PullRequestResource) GetDownloadContainerSpec() ([]corev1.Container, error) { - return s.getContainerSpec("download") +func (s *PullRequestResource) GetDownloadSteps(sourcePath string) ([]Step, error) { + return s.getSteps("download", sourcePath) } - -func (s *PullRequestResource) GetUploadContainerSpec() ([]corev1.Container, error) { - return s.getContainerSpec("upload") +func (s *PullRequestResource) GetUploadSteps(sourcePath string) ([]Step, error) { + return s.getSteps("upload", sourcePath) } -func (s *PullRequestResource) getContainerSpec(mode string) ([]corev1.Container, error) { - args := []string{"-url", s.URL, "-path", s.DestinationDir, "-mode", mode} +func (s *PullRequestResource) getSteps(mode string, sourcePath string) ([]Step, error) { + args := []string{"-url", s.URL, "-path", sourcePath, "-mode", mode} evs := []corev1.EnvVar{} for _, sec := range s.Secrets { @@ -124,17 +122,20 @@ func (s *PullRequestResource) getContainerSpec(mode string) ([]corev1.Container, } } - return []corev1.Container{{ + return []Step{{Container: corev1.Container{ Name: names.SimpleNameGenerator.RestrictLengthWithRandomSuffix(prSource + "-" + s.Name), Image: *prImage, Command: []string{"/ko-app/pullrequest-init"}, Args: args, WorkingDir: WorkspaceDir, Env: evs, - }}, nil + }}}, nil +} + +func (s *PullRequestResource) GetUploadVolumeSpec(spec *TaskSpec) ([]corev1.Volume, error) { + return nil, nil } -// SetDestinationDirectory sets the destination directory at runtime like where is the resource going to be copied to -func (s *PullRequestResource) SetDestinationDirectory(dir string) { - s.DestinationDir = dir +func (s *PullRequestResource) GetDownloadVolumeSpec(spec *TaskSpec) ([]corev1.Volume, error) { + return nil, nil } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/register.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/register.go index 6f06cdfb5..a8d527214 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/register.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/register.go @@ -48,6 +48,8 @@ func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &Task{}, &TaskList{}, + &Condition{}, + &ConditionList{}, &ClusterTask{}, &ClusterTaskList{}, &TaskRun{}, diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/resource_defaults.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/resource_defaults.go index 01b0f4c44..057d35375 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/resource_defaults.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/resource_defaults.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Tekton Authors. +Copyright 2019 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/resource_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/resource_types.go index 15620a36d..8cab9f523 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/resource_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/resource_types.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Tekton Authors. +Copyright 2019 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -17,10 +17,10 @@ limitations under the License. package v1alpha1 import ( - "github.com/knative/pkg/apis" "golang.org/x/xerrors" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "knative.dev/pkg/apis" ) // PipelineResourceType represents the type of endpoint the pipelineResource is, so that the @@ -43,19 +43,23 @@ const ( // PipelineResourceTypePullRequest indicates that this source is a SCM Pull Request. PipelineResourceTypePullRequest PipelineResourceType = "pullRequest" + + // PipelineResourceTypeCloudEvent indicates that this source is a cloud event URI + PipelineResourceTypeCloudEvent PipelineResourceType = "cloudEvent" ) // AllResourceTypes can be used for validation to check if a provided Resource type is one of the known types. -var AllResourceTypes = []PipelineResourceType{PipelineResourceTypeGit, PipelineResourceTypeStorage, PipelineResourceTypeImage, PipelineResourceTypeCluster, PipelineResourceTypePullRequest} +var AllResourceTypes = []PipelineResourceType{PipelineResourceTypeGit, PipelineResourceTypeStorage, PipelineResourceTypeImage, PipelineResourceTypeCluster, PipelineResourceTypePullRequest, PipelineResourceTypeCloudEvent} // PipelineResourceInterface interface to be implemented by different PipelineResource types type PipelineResourceInterface interface { GetName() string GetType() PipelineResourceType Replacements() map[string]string - GetDownloadContainerSpec() ([]corev1.Container, error) - GetUploadContainerSpec() ([]corev1.Container, error) - SetDestinationDirectory(string) + GetDownloadSteps(sourcePath string) ([]Step, error) + GetUploadSteps(sourcePath string) ([]Step, error) + GetUploadVolumeSpec(spec *TaskSpec) ([]corev1.Volume, error) + GetDownloadVolumeSpec(spec *TaskSpec) ([]corev1.Volume, error) } // SecretParam indicates which secret can be used to populate a field of the resource @@ -68,7 +72,7 @@ type SecretParam struct { // PipelineResourceSpec defines an individual resources used in the pipeline. type PipelineResourceSpec struct { Type PipelineResourceType `json:"type"` - Params []Param `json:"params"` + Params []ResourceParam `json:"params"` // Secrets to fetch to populate some of resource fields // +optional SecretParams []SecretParam `json:"secrets,omitempty"` @@ -143,6 +147,8 @@ func ResourceFromType(r *PipelineResource) (PipelineResourceInterface, error) { return NewStorageResource(r) case PipelineResourceTypePullRequest: return NewPullRequestResource(r) + case PipelineResourceTypeCloudEvent: + return NewCloudEventResource(r) } return nil, xerrors.Errorf("%s is an invalid or unimplemented PipelineResource", r.Spec.Type) } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/result_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/result_types.go index d15415deb..26cde1765 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/result_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/result_types.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Tekton Authors. +Copyright 2019 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,7 +21,7 @@ import ( "fmt" "net/url" - "github.com/knative/pkg/apis" + "knative.dev/pkg/apis" ) // AllResultTargetTypes is a list of all ResultTargetTypes, used for validation diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/secret_volume_mount.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/secret_volume_mount.go index aa9f53348..c973dfdb9 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/secret_volume_mount.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/secret_volume_mount.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Tekton Authors. +Copyright 2019 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/step_replacements.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/step_replacements.go new file mode 100644 index 000000000..c0dc48225 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/step_replacements.go @@ -0,0 +1,67 @@ +/* + Copyright 2019 The Tekton Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package v1alpha1 + +func ApplyStepReplacements(step *Step, stringReplacements map[string]string, arrayReplacements map[string][]string) { + step.Name = ApplyReplacements(step.Name, stringReplacements) + step.Image = ApplyReplacements(step.Image, stringReplacements) + + //Use ApplyArrayReplacements here, as additional args may be added via an array parameter. + var newArgs []string + for _, a := range step.Args { + newArgs = append(newArgs, ApplyArrayReplacements(a, stringReplacements, arrayReplacements)...) + } + step.Args = newArgs + + for ie, e := range step.Env { + step.Env[ie].Value = ApplyReplacements(e.Value, stringReplacements) + if step.Env[ie].ValueFrom != nil { + if e.ValueFrom.SecretKeyRef != nil { + step.Env[ie].ValueFrom.SecretKeyRef.LocalObjectReference.Name = ApplyReplacements(e.ValueFrom.SecretKeyRef.LocalObjectReference.Name, stringReplacements) + step.Env[ie].ValueFrom.SecretKeyRef.Key = ApplyReplacements(e.ValueFrom.SecretKeyRef.Key, stringReplacements) + } + if e.ValueFrom.ConfigMapKeyRef != nil { + step.Env[ie].ValueFrom.ConfigMapKeyRef.LocalObjectReference.Name = ApplyReplacements(e.ValueFrom.ConfigMapKeyRef.LocalObjectReference.Name, stringReplacements) + step.Env[ie].ValueFrom.ConfigMapKeyRef.Key = ApplyReplacements(e.ValueFrom.ConfigMapKeyRef.Key, stringReplacements) + } + } + } + + for ie, e := range step.EnvFrom { + step.EnvFrom[ie].Prefix = ApplyReplacements(e.Prefix, stringReplacements) + if e.ConfigMapRef != nil { + step.EnvFrom[ie].ConfigMapRef.LocalObjectReference.Name = ApplyReplacements(e.ConfigMapRef.LocalObjectReference.Name, stringReplacements) + } + if e.SecretRef != nil { + step.EnvFrom[ie].SecretRef.LocalObjectReference.Name = ApplyReplacements(e.SecretRef.LocalObjectReference.Name, stringReplacements) + } + } + step.WorkingDir = ApplyReplacements(step.WorkingDir, stringReplacements) + + //Use ApplyArrayReplacements here, as additional commands may be added via an array parameter. + var newCommand []string + for _, c := range step.Command { + newCommand = append(newCommand, ApplyArrayReplacements(c, stringReplacements, arrayReplacements)...) + } + step.Command = newCommand + + for iv, v := range step.VolumeMounts { + step.VolumeMounts[iv].Name = ApplyReplacements(v.Name, stringReplacements) + step.VolumeMounts[iv].MountPath = ApplyReplacements(v.MountPath, stringReplacements) + step.VolumeMounts[iv].SubPath = ApplyReplacements(v.SubPath, stringReplacements) + } +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/storage_resource.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/storage_resource.go index d1470e3e6..fbe271fbf 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/storage_resource.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/storage_resource.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Tekton Authors. +Copyright 2019 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -17,9 +17,11 @@ limitations under the License. package v1alpha1 import ( + "fmt" "strings" "golang.org/x/xerrors" + corev1 "k8s.io/api/core/v1" ) type PipelineResourceStorageType string @@ -55,3 +57,32 @@ func NewStorageResource(r *PipelineResource) (PipelineStorageResourceInterface, } return nil, xerrors.Errorf("StoreResource: Cannot create a storage resource without type %s in spec", r.Name) } + +func getStorageUploadVolumeSpec(s PipelineStorageResourceInterface, spec *TaskSpec) ([]corev1.Volume, error) { + var storageVol []corev1.Volume + mountedSecrets := map[string]string{} + + for _, volume := range spec.Volumes { + mountedSecrets[volume.Name] = "" + } + + // Map holds list of secrets that are mounted as volumes + for _, secretParam := range s.GetSecretParams() { + volName := fmt.Sprintf("volume-%s-%s", s.GetName(), secretParam.SecretName) + + gcsSecretVolume := corev1.Volume{ + Name: volName, + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: secretParam.SecretName, + }, + }, + } + + if _, ok := mountedSecrets[volName]; !ok { + storageVol = append(storageVol, gcsSecretVolume) + mountedSecrets[volName] = "" + } + } + return storageVol, nil +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/substitution.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/substitution.go new file mode 100644 index 000000000..c79c224c6 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/substitution.go @@ -0,0 +1,156 @@ +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "fmt" + "regexp" + "strings" + + "knative.dev/pkg/apis" +) + +const parameterSubstitution = "[_a-zA-Z][_a-zA-Z0-9.-]*" + +//TODO(#1170): Regex matches both ${} and $(), we will need to remove ${} compatibility. +const braceMatchingRegex = "(\\$({%s.(?P%s)}))|(\\$(\\(%s.(?P%s)\\)))" + +func ValidateVariable(name, value, prefix, contextPrefix, locationName, path string, vars map[string]struct{}) *apis.FieldError { + if vs, present := extractVariablesFromString(value, contextPrefix+prefix); present { + for _, v := range vs { + if _, ok := vars[v]; !ok { + return &apis.FieldError{ + Message: fmt.Sprintf("non-existent variable in %q for %s %s", value, locationName, name), + Paths: []string{path + "." + name}, + } + } + } + } + return nil +} + +// Verifies that variables matching the relevant string expressions do not reference any of the names present in vars. +func ValidateVariableProhibited(name, value, prefix, contextPrefix, locationName, path string, vars map[string]struct{}) *apis.FieldError { + if vs, present := extractVariablesFromString(value, contextPrefix+prefix); present { + for _, v := range vs { + if _, ok := vars[v]; ok { + return &apis.FieldError{ + Message: fmt.Sprintf("variable type invalid in %q for %s %s", value, locationName, name), + Paths: []string{path + "." + name}, + } + } + } + } + return nil +} + +// Verifies that variables matching the relevant string expressions are completely isolated if present. +func ValidateVariableIsolated(name, value, prefix, contextPrefix, locationName, path string, vars map[string]struct{}) *apis.FieldError { + if vs, present := extractVariablesFromString(value, contextPrefix+prefix); present { + firstMatch, _ := extractExpressionFromString(value, contextPrefix+prefix) + for _, v := range vs { + if _, ok := vars[v]; ok { + if len(value) != len(firstMatch) { + return &apis.FieldError{ + Message: fmt.Sprintf("variable is not properly isolated in %q for %s %s", value, locationName, name), + Paths: []string{path + "." + name}, + } + } + } + } + } + return nil +} + +// Extract a the first full string expressions found (e.g "${input.params.foo}"). Return +// "" and false if nothing is found. +func extractExpressionFromString(s, prefix string) (string, bool) { + pattern := fmt.Sprintf(braceMatchingRegex, prefix, parameterSubstitution, prefix, parameterSubstitution) + re := regexp.MustCompile(pattern) + match := re.FindStringSubmatch(s) + if match == nil { + return "", false + } + return match[0], true +} + +func extractVariablesFromString(s, prefix string) ([]string, bool) { + pattern := fmt.Sprintf(braceMatchingRegex, prefix, parameterSubstitution, prefix, parameterSubstitution) + re := regexp.MustCompile(pattern) + matches := re.FindAllStringSubmatch(s, -1) + if len(matches) == 0 { + return []string{}, false + } + vars := make([]string, len(matches)) + for i, match := range matches { + groups := matchGroups(match, re) + // foo -> foo + // foo.bar -> foo + // foo.bar.baz -> foo + var v string + if groups["var"] != "" { + v = groups["var"] + } else { + //TODO(#1170): Regex matches both ${} and $(), we will need to remove ${} compatibility. + v = groups["deprecated"] + } + vars[i] = strings.SplitN(v, ".", 2)[0] + } + return vars, true +} + +func matchGroups(matches []string, pattern *regexp.Regexp) map[string]string { + groups := make(map[string]string) + for i, name := range pattern.SubexpNames()[1:] { + groups[name] = matches[i+1] + } + return groups +} + +func ApplyReplacements(in string, replacements map[string]string) string { + for k, v := range replacements { + in = strings.Replace(in, fmt.Sprintf("$(%s)", k), v, -1) + + //TODO(#1170): Delete the line below when we want to remove support for ${} variable interpolation: + in = strings.Replace(in, fmt.Sprintf("${%s}", k), v, -1) + } + return in +} + +// Take an input string, and output an array of strings related to possible arrayReplacements. If there aren't any +// areas where the input can be split up via arrayReplacements, then just return an array with a single element, +// which is ApplyReplacements(in, replacements). +func ApplyArrayReplacements(in string, stringReplacements map[string]string, arrayReplacements map[string][]string) []string { + for k, v := range arrayReplacements { + stringToReplace := fmt.Sprintf("$(%s)", k) + + // If the input string matches a replacement's key (without padding characters), return the corresponding array. + // Note that the webhook should prevent all instances where this could evaluate to false. + if (strings.Count(in, stringToReplace) == 1) && len(in) == len(stringToReplace) { + return v + } + + //TODO(#1170): Delete the block below when we want to remove support for ${} variable interpolation: + deprecatedStringToReplace := fmt.Sprintf("${%s}", k) + if (strings.Count(in, deprecatedStringToReplace) == 1) && len(in) == len(deprecatedStringToReplace) { + return v + } + } + + // Otherwise return a size-1 array containing the input string with standard stringReplacements applied. + return []string{ApplyReplacements(in, stringReplacements)} +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/task_defaults.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/task_defaults.go index f8b2a3e99..6129ad586 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/task_defaults.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/task_defaults.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Tekton Authors. +Copyright 2019 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -36,4 +36,13 @@ func (ts *TaskSpec) SetDefaults(ctx context.Context) { } } } + if ts.Inputs != nil { + ts.Inputs.SetDefaults(ctx) + } +} + +func (inputs *Inputs) SetDefaults(ctx context.Context) { + for i := range inputs.Params { + inputs.Params[i].SetDefaults(ctx) + } } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/task_interface.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/task_interface.go index 963ceb69a..da68ad261 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/task_interface.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/task_interface.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Tekton Authors. +Copyright 2019 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/task_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/task_types.go index 00a0dc874..f3abd895d 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/task_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/task_types.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Tekton Authors. +Copyright 2019 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -18,9 +18,8 @@ package v1alpha1 import ( corev1 "k8s.io/api/core/v1" - - "github.com/knative/pkg/apis" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "knative.dev/pkg/apis" ) func (t *Task) TaskSpec() TaskSpec { @@ -48,7 +47,7 @@ type TaskSpec struct { // Steps are the steps of the build; each step is run sequentially with the // source mounted into /workspace. - Steps []corev1.Container `json:"steps,omitempty"` + Steps []Step `json:"steps,omitempty"` // Volumes is a collection of volumes that are available to mount into the // steps of the build. @@ -57,10 +56,12 @@ type TaskSpec struct { // StepTemplate can be used as the basis for all step containers within the // Task, so that the steps inherit settings on the base container. StepTemplate *corev1.Container `json:"stepTemplate,omitempty"` +} - // ContainerTemplate is the deprecated previous name of the StepTemplate - // field (#977). - ContainerTemplate *corev1.Container `json:"containerTemplate,omitempty"` +// Step embeds the Container type, which allows it to include fields not +// provided by Container. +type Step struct { + corev1.Container } // Check that Task may be validated and defaulted. diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/task_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/task_validation.go index 8e9a3afea..638336b53 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/task_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/task_validation.go @@ -21,12 +21,10 @@ import ( "fmt" "strings" - "github.com/knative/pkg/apis" - "github.com/tektoncd/pipeline/pkg/merge" - "github.com/tektoncd/pipeline/pkg/templating" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/util/validation" + "knative.dev/pkg/apis" ) func (t *Task) Validate(ctx context.Context) *apis.FieldError { @@ -47,7 +45,7 @@ func (ts *TaskSpec) Validate(ctx context.Context) *apis.FieldError { if err := ValidateVolumes(ts.Volumes).ViaField("volumes"); err != nil { return err } - mergedSteps, err := merge.CombineStepsWithStepTemplate(ts.StepTemplate, ts.Steps) + mergedSteps, err := MergeStepsWithStepTemplate(ts.StepTemplate, ts.Steps) if err != nil { return &apis.FieldError{ Message: fmt.Sprintf("error merging step template and steps: %s", err), @@ -55,15 +53,6 @@ func (ts *TaskSpec) Validate(ctx context.Context) *apis.FieldError { } } - // The ContainerTemplate field is deprecated (#977) - mergedSteps, err = merge.CombineStepsWithStepTemplate(ts.ContainerTemplate, mergedSteps) - if err != nil { - return &apis.FieldError{ - Message: fmt.Sprintf("error merging containerTemplate and steps: %s", err), - Paths: []string{"stepTemplate"}, - } - } - if err := validateSteps(mergedSteps).ViaField("steps"); err != nil { return err } @@ -80,6 +69,9 @@ func (ts *TaskSpec) Validate(ctx context.Context) *apis.FieldError { if err := checkForDuplicates(ts.Inputs.Resources, "taskspec.Inputs.Resources.Name"); err != nil { return err } + if err := validateInputParameterTypes(ts.Inputs); err != nil { + return err + } } if ts.Outputs != nil { for _, resource := range ts.Outputs.Resources { @@ -94,7 +86,7 @@ func (ts *TaskSpec) Validate(ctx context.Context) *apis.FieldError { // Validate task step names for _, step := range ts.Steps { - if errs := validation.IsDNS1123Label(step.Name); len(errs) > 0 { + if errs := validation.IsDNS1123Label(step.Name); step.Name != "" && len(errs) > 0 { return &apis.FieldError{ Message: fmt.Sprintf("invalid value %q", step.Name), Paths: []string{"taskspec.steps.name"}, @@ -124,7 +116,7 @@ func ValidateVolumes(volumes []corev1.Volume) *apis.FieldError { return nil } -func validateSteps(steps []corev1.Container) *apis.FieldError { +func validateSteps(steps []Step) *apis.FieldError { // Task must not have duplicate step names. names := map[string]struct{}{} for _, s := range steps { @@ -143,17 +135,54 @@ func validateSteps(steps []corev1.Container) *apis.FieldError { return nil } -func validateInputParameterVariables(steps []corev1.Container, inputs *Inputs) *apis.FieldError { +func validateInputParameterTypes(inputs *Inputs) *apis.FieldError { + for _, p := range inputs.Params { + // Ensure param has a valid type. + validType := false + for _, allowedType := range AllParamTypes { + if p.Type == allowedType { + validType = true + } + } + if !validType { + return apis.ErrInvalidValue(p.Type, fmt.Sprintf("taskspec.inputs.params.%s.type", p.Name)) + } + + // If a default value is provided, ensure its type matches param's declared type. + if (p.Default != nil) && (p.Default.Type != p.Type) { + return &apis.FieldError{ + Message: fmt.Sprintf( + "\"%v\" type does not match default value's type: \"%v\"", p.Type, p.Default.Type), + Paths: []string{ + fmt.Sprintf("taskspec.inputs.params.%s.type", p.Name), + fmt.Sprintf("taskspec.inputs.params.%s.default.type", p.Name), + }, + } + } + } + return nil +} + +func validateInputParameterVariables(steps []Step, inputs *Inputs) *apis.FieldError { parameterNames := map[string]struct{}{} + arrayParameterNames := map[string]struct{}{} + if inputs != nil { for _, p := range inputs.Params { parameterNames[p.Name] = struct{}{} + if p.Type == ParamTypeArray { + arrayParameterNames[p.Name] = struct{}{} + } } } - return validateVariables(steps, "params", parameterNames) + + if err := validateVariables(steps, "params", parameterNames); err != nil { + return err + } + return validateArrayUsage(steps, "params", arrayParameterNames) } -func validateResourceVariables(steps []corev1.Container, inputs *Inputs, outputs *Outputs) *apis.FieldError { +func validateResourceVariables(steps []Step, inputs *Inputs, outputs *Outputs) *apis.FieldError { resourceNames := map[string]struct{}{} if inputs != nil { for _, r := range inputs.Resources { @@ -173,7 +202,48 @@ func validateResourceVariables(steps []corev1.Container, inputs *Inputs, outputs return validateVariables(steps, "resources", resourceNames) } -func validateVariables(steps []corev1.Container, prefix string, vars map[string]struct{}) *apis.FieldError { +func validateArrayUsage(steps []Step, prefix string, vars map[string]struct{}) *apis.FieldError { + for _, step := range steps { + if err := validateTaskNoArrayReferenced("name", step.Name, prefix, vars); err != nil { + return err + } + if err := validateTaskNoArrayReferenced("image", step.Image, prefix, vars); err != nil { + return err + } + if err := validateTaskNoArrayReferenced("workingDir", step.WorkingDir, prefix, vars); err != nil { + return err + } + for i, cmd := range step.Command { + if err := validateTaskArraysIsolated(fmt.Sprintf("command[%d]", i), cmd, prefix, vars); err != nil { + return err + } + } + for i, arg := range step.Args { + if err := validateTaskArraysIsolated(fmt.Sprintf("arg[%d]", i), arg, prefix, vars); err != nil { + return err + } + } + for _, env := range step.Env { + if err := validateTaskNoArrayReferenced(fmt.Sprintf("env[%s]", env.Name), env.Value, prefix, vars); err != nil { + return err + } + } + for i, v := range step.VolumeMounts { + if err := validateTaskNoArrayReferenced(fmt.Sprintf("volumeMount[%d].Name", i), v.Name, prefix, vars); err != nil { + return err + } + if err := validateTaskNoArrayReferenced(fmt.Sprintf("volumeMount[%d].MountPath", i), v.MountPath, prefix, vars); err != nil { + return err + } + if err := validateTaskNoArrayReferenced(fmt.Sprintf("volumeMount[%d].SubPath", i), v.SubPath, prefix, vars); err != nil { + return err + } + } + } + return nil +} + +func validateVariables(steps []Step, prefix string, vars map[string]struct{}) *apis.FieldError { for _, step := range steps { if err := validateTaskVariable("name", step.Name, prefix, vars); err != nil { return err @@ -215,7 +285,15 @@ func validateVariables(steps []corev1.Container, prefix string, vars map[string] } func validateTaskVariable(name, value, prefix string, vars map[string]struct{}) *apis.FieldError { - return templating.ValidateVariable(name, value, prefix, "(?:inputs|outputs).", "step", "taskspec.steps", vars) + return ValidateVariable(name, value, prefix, "(?:inputs|outputs).", "step", "taskspec.steps", vars) +} + +func validateTaskNoArrayReferenced(name, value, prefix string, arrayNames map[string]struct{}) *apis.FieldError { + return ValidateVariableProhibited(name, value, prefix, "(?:inputs|outputs).", "step", "taskspec.steps", arrayNames) +} + +func validateTaskArraysIsolated(name, value, prefix string, arrayNames map[string]struct{}) *apis.FieldError { + return ValidateVariableIsolated(name, value, prefix, "(?:inputs|outputs).", "step", "taskspec.steps", arrayNames) } func checkForDuplicates(resources []TaskResource, path string) *apis.FieldError { diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/taskrun_defaults.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/taskrun_defaults.go index e64c06462..ee92487ef 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/taskrun_defaults.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/taskrun_defaults.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Tekton Authors. +Copyright 2019 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -29,11 +29,21 @@ func (tr *TaskRun) SetDefaults(ctx context.Context) { } func (trs *TaskRunSpec) SetDefaults(ctx context.Context) { + cfg := config.FromContextOrDefaults(ctx) if trs.TaskRef != nil && trs.TaskRef.Kind == "" { trs.TaskRef.Kind = NamespacedTaskKind } - cfg := config.FromContextOrDefaults(ctx) + if trs.Timeout == nil { - trs.Timeout = &metav1.Duration{Duration: time.Duration(cfg.Defaults.DefaultTimeoutMinutes) * time.Minute} + var timeout *metav1.Duration + if IsUpgradeViaDefaulting(ctx) { + // This case is for preexisting `TaskRun` before 0.5.0, so let's + // add the old default timeout. + // Most likely those TaskRun passing here are already done and/or already running + timeout = &metav1.Duration{Duration: config.DefaultTimeoutMinutes * time.Minute} + } else { + timeout = &metav1.Duration{Duration: time.Duration(cfg.Defaults.DefaultTimeoutMinutes) * time.Minute} + } + trs.Timeout = timeout } } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/taskrun_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/taskrun_types.go index 4f4968bdb..4561eda86 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/taskrun_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/taskrun_types.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Tekton Authors. +Copyright 2019 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -20,10 +20,10 @@ import ( "fmt" "time" - "github.com/knative/pkg/apis" - duckv1beta1 "github.com/knative/pkg/apis/duck/v1beta1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "knative.dev/pkg/apis" + duckv1beta1 "knative.dev/pkg/apis/duck/v1beta1" ) // Check that TaskRun may be validated and defaulted. @@ -53,6 +53,11 @@ type TaskRunSpec struct { // Refer Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration // +optional Timeout *metav1.Duration `json:"timeout,omitempty"` + + // PodTemplate holds pod specific configuration + PodTemplate PodTemplate `json:"podTemplate,omitempty"` + + // FIXME(vdemeester) Deprecated // NodeSelector is a selector which must be true for the pod to fit on a node. // Selector which must match a node's labels for the pod to be scheduled on that node. // More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ @@ -128,6 +133,12 @@ type TaskRunStatus struct { // Steps describes the state of each build step container. // +optional Steps []StepState `json:"steps,omitempty"` + + // CloudEvents describe the state of each cloud event requested via a + // CloudEventResource. + // +optional + CloudEvents []CloudEventDelivery `json:"cloudEvents,omitempty"` + // RetriesStatus contains the history of TaskRunStatus in case of a retry in order to keep record of failures. // All TaskRunStatus stored in RetriesStatus will have no date within the RetriesStatus as is redundant. // +optional @@ -160,10 +171,67 @@ func (tr *TaskRunStatus) SetCondition(newCond *apis.Condition) { } } +// InitializeCloudEvents initializes the CloudEvents part of the TaskRunStatus +// from a list of event targets +func (tr *TaskRunStatus) InitializeCloudEvents(targets []string) { + // len(nil slice) is 0 + if len(targets) > 0 { + initialState := CloudEventDeliveryState{ + Condition: CloudEventConditionUnknown, + RetryCount: 0, + } + events := make([]CloudEventDelivery, len(targets)) + for idx, target := range targets { + events[idx] = CloudEventDelivery{ + Target: target, + Status: initialState, + } + } + tr.CloudEvents = events + } +} + // StepState reports the results of running a step in the Task. type StepState struct { corev1.ContainerState - Name string `json:"name,omitempty"` + Name string `json:"name,omitempty"` + ContainerName string `json:"container,omitempty"` + ImageID string `json:"imageID,omitempty"` +} + +// CloudEventDelivery is the target of a cloud event along with the state of +// delivery. +type CloudEventDelivery struct { + // Target points to an addressable + Target string `json:"target,omitempty"` + Status CloudEventDeliveryState `json:"status,omitempty"` +} + +// CloudEventCondition is a string that represents the condition of the event. +type CloudEventCondition string + +const ( + // CloudEventConditionUnknown means that the condition for the event to be + // triggered was not met yet, or we don't know the state yet. + CloudEventConditionUnknown CloudEventCondition = "Unknown" + // CloudEventConditionSent means that the event was sent successfully + CloudEventConditionSent CloudEventCondition = "Sent" + // CloudEventConditionFailed means that there was one or more attempts to + // send the event, and none was successful so far. + CloudEventConditionFailed CloudEventCondition = "Failed" +) + +// CloudEventDeliveryState reports the state of a cloud event to be sent. +type CloudEventDeliveryState struct { + // Current status + Condition CloudEventCondition `json:"condition,omitempty"` + // SentAt is the time at which the last attempt to send the event was made + // +optional + SentAt *metav1.Time `json:"sentAt,omitempty"` + // Error is the text of error (if any) + Error string `json:"message"` + // RetryCount is the number of attempts of sending the cloud event + RetryCount int32 `json:"retryCount"` } // +genclient diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/taskrun_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/taskrun_validation.go index 08cc6a2cb..f0cef3117 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/taskrun_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/taskrun_validation.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Tekton Authors. +Copyright 2019 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,8 +21,8 @@ import ( "fmt" "strings" - "github.com/knative/pkg/apis" "k8s.io/apimachinery/pkg/api/equality" + "knative.dev/pkg/apis" ) // Validate taskrun @@ -49,6 +49,13 @@ func (ts *TaskRunSpec) Validate(ctx context.Context) *apis.FieldError { return apis.ErrMissingField("spec.taskref.name", "spec.taskspec") } + // Validate TaskSpec if it's present + if ts.TaskSpec != nil { + if err := ts.TaskSpec.Validate(ctx); err != nil { + return err + } + } + // check for input resources if err := ts.Inputs.Validate(ctx, "spec.Inputs"); err != nil { return err @@ -66,6 +73,13 @@ func (ts *TaskRunSpec) Validate(ctx context.Context) *apis.FieldError { } } + if ts.Timeout != nil { + // timeout should be a valid duration of at least 0. + if ts.Timeout.Duration < 0 { + return apis.ErrInvalidValue(fmt.Sprintf("%s should be >= 0", ts.Timeout.Duration.String()), "spec.timeout") + } + } + return nil } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/zz_generated.deepcopy.go index d95226a45..031f008d6 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/zz_generated.deepcopy.go @@ -16,16 +16,37 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This file was autogenerated by deepcopy-gen. Do not edit it manually! +// Code generated by deepcopy-gen. DO NOT EDIT. package v1alpha1 import ( v1 "k8s.io/api/core/v1" - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ArrayOrString) DeepCopyInto(out *ArrayOrString) { + *out = *in + if in.ArrayVal != nil { + in, out := &in.ArrayVal, &out.ArrayVal + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ArrayOrString. +func (in *ArrayOrString) DeepCopy() *ArrayOrString { + if in == nil { + return nil + } + out := new(ArrayOrString) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ArtifactBucket) DeepCopyInto(out *ArtifactBucket) { *out = *in @@ -52,12 +73,8 @@ func (in *ArtifactPVC) DeepCopyInto(out *ArtifactPVC) { *out = *in if in.PersistentVolumeClaim != nil { in, out := &in.PersistentVolumeClaim, &out.PersistentVolumeClaim - if *in == nil { - *out = nil - } else { - *out = new(v1.PersistentVolumeClaim) - (*in).DeepCopyInto(*out) - } + *out = new(v1.PersistentVolumeClaim) + (*in).DeepCopyInto(*out) } return } @@ -88,6 +105,59 @@ func (in *BuildGCSResource) DeepCopy() *BuildGCSResource { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudEventDelivery) DeepCopyInto(out *CloudEventDelivery) { + *out = *in + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudEventDelivery. +func (in *CloudEventDelivery) DeepCopy() *CloudEventDelivery { + if in == nil { + return nil + } + out := new(CloudEventDelivery) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudEventDeliveryState) DeepCopyInto(out *CloudEventDeliveryState) { + *out = *in + if in.SentAt != nil { + in, out := &in.SentAt, &out.SentAt + *out = (*in).DeepCopy() + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudEventDeliveryState. +func (in *CloudEventDeliveryState) DeepCopy() *CloudEventDeliveryState { + if in == nil { + return nil + } + out := new(CloudEventDeliveryState) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudEventResource) DeepCopyInto(out *CloudEventResource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudEventResource. +func (in *CloudEventResource) DeepCopy() *CloudEventResource { + if in == nil { + return nil + } + out := new(CloudEventResource) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterResource) DeepCopyInto(out *ClusterResource) { *out = *in @@ -174,6 +244,136 @@ func (in *ClusterTaskList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Condition) DeepCopyInto(out *Condition) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Condition. +func (in *Condition) DeepCopy() *Condition { + if in == nil { + return nil + } + out := new(Condition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Condition) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionCheck) DeepCopyInto(out *ConditionCheck) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionCheck. +func (in *ConditionCheck) DeepCopy() *ConditionCheck { + if in == nil { + return nil + } + out := new(ConditionCheck) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionCheckStatus) DeepCopyInto(out *ConditionCheckStatus) { + *out = *in + in.Status.DeepCopyInto(&out.Status) + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = (*in).DeepCopy() + } + if in.CompletionTime != nil { + in, out := &in.CompletionTime, &out.CompletionTime + *out = (*in).DeepCopy() + } + in.Check.DeepCopyInto(&out.Check) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionCheckStatus. +func (in *ConditionCheckStatus) DeepCopy() *ConditionCheckStatus { + if in == nil { + return nil + } + out := new(ConditionCheckStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionList) DeepCopyInto(out *ConditionList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionList. +func (in *ConditionList) DeepCopy() *ConditionList { + if in == nil { + return nil + } + out := new(ConditionList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ConditionList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionSpec) DeepCopyInto(out *ConditionSpec) { + *out = *in + in.Check.DeepCopyInto(&out.Check) + if in.Params != nil { + in, out := &in.Params, &out.Params + *out = make([]ParamSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionSpec. +func (in *ConditionSpec) DeepCopy() *ConditionSpec { + if in == nil { + return nil + } + out := new(ConditionSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DAG) DeepCopyInto(out *DAG) { *out = *in @@ -181,12 +381,15 @@ func (in *DAG) DeepCopyInto(out *DAG) { in, out := &in.Nodes, &out.Nodes *out = make(map[string]*Node, len(*in)) for key, val := range *in { + var outVal *Node if val == nil { (*out)[key] = nil } else { - (*out)[key] = new(Node) - val.DeepCopyInto((*out)[key]) + in, out := &val, &outVal + *out = new(Node) + (*in).DeepCopyInto(*out) } + (*out)[key] = outVal } } return @@ -266,7 +469,9 @@ func (in *Inputs) DeepCopyInto(out *Inputs) { if in.Params != nil { in, out := &in.Params, &out.Params *out = make([]ParamSpec, len(*in)) - copy(*out, *in) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } } return } @@ -289,11 +494,10 @@ func (in *Node) DeepCopyInto(out *Node) { in, out := &in.Prev, &out.Prev *out = make([]*Node, len(*in)) for i := range *in { - if (*in)[i] == nil { - (*out)[i] = nil - } else { - (*out)[i] = new(Node) - (*in)[i].DeepCopyInto((*out)[i]) + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(Node) + (*in).DeepCopyInto(*out) } } } @@ -301,11 +505,10 @@ func (in *Node) DeepCopyInto(out *Node) { in, out := &in.Next, &out.Next *out = make([]*Node, len(*in)) for i := range *in { - if (*in)[i] == nil { - (*out)[i] = nil - } else { - (*out)[i] = new(Node) - (*in)[i].DeepCopyInto((*out)[i]) + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(Node) + (*in).DeepCopyInto(*out) } } } @@ -351,6 +554,7 @@ func (in *Outputs) DeepCopy() *Outputs { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Param) DeepCopyInto(out *Param) { *out = *in + in.Value.DeepCopyInto(&out.Value) return } @@ -367,6 +571,11 @@ func (in *Param) DeepCopy() *Param { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ParamSpec) DeepCopyInto(out *ParamSpec) { *out = *in + if in.Default != nil { + in, out := &in.Default, &out.Default + *out = new(ArrayOrString) + (*in).DeepCopyInto(*out) + } return } @@ -588,7 +797,7 @@ func (in *PipelineResourceSpec) DeepCopyInto(out *PipelineResourceSpec) { *out = *in if in.Params != nil { in, out := &in.Params, &out.Params - *out = make([]Param, len(*in)) + *out = make([]ResourceParam, len(*in)) copy(*out, *in) } if in.SecretParams != nil { @@ -653,6 +862,27 @@ func (in *PipelineRun) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PipelineRunConditionCheckStatus) DeepCopyInto(out *PipelineRunConditionCheckStatus) { + *out = *in + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(ConditionCheckStatus) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineRunConditionCheckStatus. +func (in *PipelineRunConditionCheckStatus) DeepCopy() *PipelineRunConditionCheckStatus { + if in == nil { + return nil + } + out := new(PipelineRunConditionCheckStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PipelineRunList) DeepCopyInto(out *PipelineRunList) { *out = *in @@ -698,7 +928,9 @@ func (in *PipelineRunSpec) DeepCopyInto(out *PipelineRunSpec) { if in.Params != nil { in, out := &in.Params, &out.Params *out = make([]Param, len(*in)) - copy(*out, *in) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } } if in.ServiceAccounts != nil { in, out := &in.ServiceAccounts, &out.ServiceAccounts @@ -707,22 +939,15 @@ func (in *PipelineRunSpec) DeepCopyInto(out *PipelineRunSpec) { } if in.Results != nil { in, out := &in.Results, &out.Results - if *in == nil { - *out = nil - } else { - *out = new(Results) - **out = **in - } + *out = new(Results) + **out = **in } if in.Timeout != nil { in, out := &in.Timeout, &out.Timeout - if *in == nil { - *out = nil - } else { - *out = new(meta_v1.Duration) - **out = **in - } + *out = new(metav1.Duration) + **out = **in } + in.PodTemplate.DeepCopyInto(&out.PodTemplate) if in.NodeSelector != nil { in, out := &in.NodeSelector, &out.NodeSelector *out = make(map[string]string, len(*in)) @@ -739,12 +964,8 @@ func (in *PipelineRunSpec) DeepCopyInto(out *PipelineRunSpec) { } if in.Affinity != nil { in, out := &in.Affinity, &out.Affinity - if *in == nil { - *out = nil - } else { - *out = new(v1.Affinity) - (*in).DeepCopyInto(*out) - } + *out = new(v1.Affinity) + (*in).DeepCopyInto(*out) } return } @@ -781,41 +1002,30 @@ func (in *PipelineRunStatus) DeepCopyInto(out *PipelineRunStatus) { in.Status.DeepCopyInto(&out.Status) if in.Results != nil { in, out := &in.Results, &out.Results - if *in == nil { - *out = nil - } else { - *out = new(Results) - **out = **in - } + *out = new(Results) + **out = **in } if in.StartTime != nil { in, out := &in.StartTime, &out.StartTime - if *in == nil { - *out = nil - } else { - *out = new(meta_v1.Time) - (*in).DeepCopyInto(*out) - } + *out = (*in).DeepCopy() } if in.CompletionTime != nil { in, out := &in.CompletionTime, &out.CompletionTime - if *in == nil { - *out = nil - } else { - *out = new(meta_v1.Time) - (*in).DeepCopyInto(*out) - } + *out = (*in).DeepCopy() } if in.TaskRuns != nil { in, out := &in.TaskRuns, &out.TaskRuns *out = make(map[string]*PipelineRunTaskRunStatus, len(*in)) for key, val := range *in { + var outVal *PipelineRunTaskRunStatus if val == nil { (*out)[key] = nil } else { - (*out)[key] = new(PipelineRunTaskRunStatus) - val.DeepCopyInto((*out)[key]) + in, out := &val, &outVal + *out = new(PipelineRunTaskRunStatus) + (*in).DeepCopyInto(*out) } + (*out)[key] = outVal } } return @@ -836,11 +1046,22 @@ func (in *PipelineRunTaskRunStatus) DeepCopyInto(out *PipelineRunTaskRunStatus) *out = *in if in.Status != nil { in, out := &in.Status, &out.Status - if *in == nil { - *out = nil - } else { - *out = new(TaskRunStatus) - (*in).DeepCopyInto(*out) + *out = new(TaskRunStatus) + (*in).DeepCopyInto(*out) + } + if in.ConditionChecks != nil { + in, out := &in.ConditionChecks, &out.ConditionChecks + *out = make(map[string]*PipelineRunConditionCheckStatus, len(*in)) + for key, val := range *in { + var outVal *PipelineRunConditionCheckStatus + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(PipelineRunConditionCheckStatus) + (*in).DeepCopyInto(*out) + } + (*out)[key] = outVal } } return @@ -874,7 +1095,9 @@ func (in *PipelineSpec) DeepCopyInto(out *PipelineSpec) { if in.Params != nil { in, out := &in.Params, &out.Params *out = make([]ParamSpec, len(*in)) - copy(*out, *in) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } } return } @@ -909,6 +1132,13 @@ func (in *PipelineStatus) DeepCopy() *PipelineStatus { func (in *PipelineTask) DeepCopyInto(out *PipelineTask) { *out = *in out.TaskRef = in.TaskRef + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]PipelineTaskCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } if in.RunAfter != nil { in, out := &in.RunAfter, &out.RunAfter *out = make([]string, len(*in)) @@ -916,17 +1146,15 @@ func (in *PipelineTask) DeepCopyInto(out *PipelineTask) { } if in.Resources != nil { in, out := &in.Resources, &out.Resources - if *in == nil { - *out = nil - } else { - *out = new(PipelineTaskResources) - (*in).DeepCopyInto(*out) - } + *out = new(PipelineTaskResources) + (*in).DeepCopyInto(*out) } if in.Params != nil { in, out := &in.Params, &out.Params *out = make([]Param, len(*in)) - copy(*out, *in) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } } return } @@ -941,6 +1169,29 @@ func (in *PipelineTask) DeepCopy() *PipelineTask { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PipelineTaskCondition) DeepCopyInto(out *PipelineTaskCondition) { + *out = *in + if in.Params != nil { + in, out := &in.Params, &out.Params + *out = make([]Param, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineTaskCondition. +func (in *PipelineTaskCondition) DeepCopy() *PipelineTaskCondition { + if in == nil { + return nil + } + out := new(PipelineTaskCondition) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PipelineTaskInputResource) DeepCopyInto(out *PipelineTaskInputResource) { *out = *in @@ -1038,6 +1289,53 @@ func (in *PipelineTaskRun) DeepCopy() *PipelineTaskRun { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodTemplate) DeepCopyInto(out *PodTemplate) { + *out = *in + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]v1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Affinity != nil { + in, out := &in.Affinity, &out.Affinity + *out = new(v1.Affinity) + (*in).DeepCopyInto(*out) + } + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(v1.PodSecurityContext) + (*in).DeepCopyInto(*out) + } + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]v1.Volume, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodTemplate. +func (in *PodTemplate) DeepCopy() *PodTemplate { + if in == nil { + return nil + } + out := new(PodTemplate) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PullRequestResource) DeepCopyInto(out *PullRequestResource) { *out = *in @@ -1059,6 +1357,22 @@ func (in *PullRequestResource) DeepCopy() *PullRequestResource { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceParam) DeepCopyInto(out *ResourceParam) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceParam. +func (in *ResourceParam) DeepCopy() *ResourceParam { + if in == nil { + return nil + } + out := new(ResourceParam) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Results) DeepCopyInto(out *Results) { *out = *in @@ -1091,6 +1405,23 @@ func (in *SecretParam) DeepCopy() *SecretParam { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Step) DeepCopyInto(out *Step) { + *out = *in + in.Container.DeepCopyInto(&out.Container) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Step. +func (in *Step) DeepCopy() *Step { + if in == nil { + return nil + } + out := new(Step) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *StepState) DeepCopyInto(out *StepState) { *out = *in @@ -1206,12 +1537,8 @@ func (in *TaskResourceBinding) DeepCopyInto(out *TaskResourceBinding) { out.ResourceRef = in.ResourceRef if in.ResourceSpec != nil { in, out := &in.ResourceSpec, &out.ResourceSpec - if *in == nil { - *out = nil - } else { - *out = new(PipelineResourceSpec) - (*in).DeepCopyInto(*out) - } + *out = new(PipelineResourceSpec) + (*in).DeepCopyInto(*out) } if in.Paths != nil { in, out := &in.Paths, &out.Paths @@ -1272,7 +1599,9 @@ func (in *TaskRunInputs) DeepCopyInto(out *TaskRunInputs) { if in.Params != nil { in, out := &in.Params, &out.Params *out = make([]Param, len(*in)) - copy(*out, *in) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } } return } @@ -1350,40 +1679,25 @@ func (in *TaskRunSpec) DeepCopyInto(out *TaskRunSpec) { in.Outputs.DeepCopyInto(&out.Outputs) if in.Results != nil { in, out := &in.Results, &out.Results - if *in == nil { - *out = nil - } else { - *out = new(Results) - **out = **in - } + *out = new(Results) + **out = **in } if in.TaskRef != nil { in, out := &in.TaskRef, &out.TaskRef - if *in == nil { - *out = nil - } else { - *out = new(TaskRef) - **out = **in - } + *out = new(TaskRef) + **out = **in } if in.TaskSpec != nil { in, out := &in.TaskSpec, &out.TaskSpec - if *in == nil { - *out = nil - } else { - *out = new(TaskSpec) - (*in).DeepCopyInto(*out) - } + *out = new(TaskSpec) + (*in).DeepCopyInto(*out) } if in.Timeout != nil { in, out := &in.Timeout, &out.Timeout - if *in == nil { - *out = nil - } else { - *out = new(meta_v1.Duration) - **out = **in - } + *out = new(metav1.Duration) + **out = **in } + in.PodTemplate.DeepCopyInto(&out.PodTemplate) if in.NodeSelector != nil { in, out := &in.NodeSelector, &out.NodeSelector *out = make(map[string]string, len(*in)) @@ -1400,12 +1714,8 @@ func (in *TaskRunSpec) DeepCopyInto(out *TaskRunSpec) { } if in.Affinity != nil { in, out := &in.Affinity, &out.Affinity - if *in == nil { - *out = nil - } else { - *out = new(v1.Affinity) - (*in).DeepCopyInto(*out) - } + *out = new(v1.Affinity) + (*in).DeepCopyInto(*out) } return } @@ -1426,30 +1736,16 @@ func (in *TaskRunStatus) DeepCopyInto(out *TaskRunStatus) { in.Status.DeepCopyInto(&out.Status) if in.Results != nil { in, out := &in.Results, &out.Results - if *in == nil { - *out = nil - } else { - *out = new(Results) - **out = **in - } + *out = new(Results) + **out = **in } if in.StartTime != nil { in, out := &in.StartTime, &out.StartTime - if *in == nil { - *out = nil - } else { - *out = new(meta_v1.Time) - (*in).DeepCopyInto(*out) - } + *out = (*in).DeepCopy() } if in.CompletionTime != nil { in, out := &in.CompletionTime, &out.CompletionTime - if *in == nil { - *out = nil - } else { - *out = new(meta_v1.Time) - (*in).DeepCopyInto(*out) - } + *out = (*in).DeepCopy() } if in.Steps != nil { in, out := &in.Steps, &out.Steps @@ -1458,6 +1754,13 @@ func (in *TaskRunStatus) DeepCopyInto(out *TaskRunStatus) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.CloudEvents != nil { + in, out := &in.CloudEvents, &out.CloudEvents + *out = make([]CloudEventDelivery, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } if in.RetriesStatus != nil { in, out := &in.RetriesStatus, &out.RetriesStatus *out = make([]TaskRunStatus, len(*in)) @@ -1488,25 +1791,17 @@ func (in *TaskSpec) DeepCopyInto(out *TaskSpec) { *out = *in if in.Inputs != nil { in, out := &in.Inputs, &out.Inputs - if *in == nil { - *out = nil - } else { - *out = new(Inputs) - (*in).DeepCopyInto(*out) - } + *out = new(Inputs) + (*in).DeepCopyInto(*out) } if in.Outputs != nil { in, out := &in.Outputs, &out.Outputs - if *in == nil { - *out = nil - } else { - *out = new(Outputs) - (*in).DeepCopyInto(*out) - } + *out = new(Outputs) + (*in).DeepCopyInto(*out) } if in.Steps != nil { in, out := &in.Steps, &out.Steps - *out = make([]v1.Container, len(*in)) + *out = make([]Step, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -1520,21 +1815,8 @@ func (in *TaskSpec) DeepCopyInto(out *TaskSpec) { } if in.StepTemplate != nil { in, out := &in.StepTemplate, &out.StepTemplate - if *in == nil { - *out = nil - } else { - *out = new(v1.Container) - (*in).DeepCopyInto(*out) - } - } - if in.ContainerTemplate != nil { - in, out := &in.ContainerTemplate, &out.ContainerTemplate - if *in == nil { - *out = nil - } else { - *out = new(v1.Container) - (*in).DeepCopyInto(*out) - } + *out = new(v1.Container) + (*in).DeepCopyInto(*out) } return } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/artifacts/artifacts_storage.go b/vendor/github.com/tektoncd/pipeline/pkg/artifacts/artifacts_storage.go index a5d299c21..0d149ddc2 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/artifacts/artifacts_storage.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/artifacts/artifacts_storage.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Tekton Authors. +Copyright 2019 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -41,13 +41,16 @@ const ( // DefaultPvcSize is the default size of the PVC to create DefaultPvcSize = "5Gi" + + // PvcStorageClassNameKey is the name of the configmap entry that specifies the storage class of the PVC to create + PvcStorageClassNameKey = "storageClassName" ) // ArtifactStorageInterface is an interface to define the steps to copy // an pipeline artifact to/from temporary storage type ArtifactStorageInterface interface { - GetCopyToStorageFromContainerSpec(name, sourcePath, destinationPath string) []corev1.Container - GetCopyFromStorageToContainerSpec(name, sourcePath, destinationPath string) []corev1.Container + GetCopyToStorageFromSteps(name, sourcePath, destinationPath string) []v1alpha1.Step + GetCopyFromStorageToSteps(name, sourcePath, destinationPath string) []v1alpha1.Step GetSecretsVolumes() []corev1.Volume GetType() string StorageBasePath(pr *v1alpha1.PipelineRun) string @@ -163,8 +166,10 @@ func createPVC(pr *v1alpha1.PipelineRun, c kubernetes.Interface) (*corev1.Persis return nil, xerrors.Errorf("failed to get PVC ConfigMap %s for %q due to error: %w", PvcConfigName, pr.Name, err) } var pvcSizeStr string + var pvcStorageClassNameStr string if configMap != nil { pvcSizeStr = configMap.Data[PvcSizeKey] + pvcStorageClassNameStr = configMap.Data[PvcStorageClassNameKey] } if pvcSizeStr == "" { pvcSizeStr = DefaultPvcSize @@ -173,7 +178,14 @@ func createPVC(pr *v1alpha1.PipelineRun, c kubernetes.Interface) (*corev1.Persis if err != nil { return nil, xerrors.Errorf("failed to create Persistent Volume spec for %q due to error: %w", pr.Name, err) } - pvcSpec := GetPVCSpec(pr, pvcSize) + var pvcStorageClassName *string + if pvcStorageClassNameStr == "" { + pvcStorageClassName = nil + } else { + pvcStorageClassName = &pvcStorageClassNameStr + } + + pvcSpec := GetPVCSpec(pr, pvcSize, pvcStorageClassName) pvc, err := c.CoreV1().PersistentVolumeClaims(pr.Namespace).Create(pvcSpec) if err != nil { return nil, xerrors.Errorf("failed to claim Persistent Volume %q due to error: %w", pr.Name, err) @@ -197,7 +209,7 @@ func deletePVC(pr *v1alpha1.PipelineRun, c kubernetes.Interface) error { } // GetPVCSpec returns the PVC to create for a given PipelineRun -func GetPVCSpec(pr *v1alpha1.PipelineRun, pvcSize resource.Quantity) *corev1.PersistentVolumeClaim { +func GetPVCSpec(pr *v1alpha1.PipelineRun, pvcSize resource.Quantity, storageClassName *string) *corev1.PersistentVolumeClaim { return &corev1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Namespace: pr.Namespace, @@ -211,6 +223,7 @@ func GetPVCSpec(pr *v1alpha1.PipelineRun, pvcSize resource.Quantity) *corev1.Per corev1.ResourceStorage: pvcSize, }, }, + StorageClassName: storageClassName, }, } } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/clientset.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/clientset.go index 49eeb627e..4f2e5dbfd 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/clientset.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/clientset.go @@ -13,6 +13,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ + +// Code generated by client-gen. DO NOT EDIT. + package versioned import ( diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/doc.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/doc.go index d5f19d2cd..c166b8dc1 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/doc.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/doc.go @@ -13,5 +13,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ + +// Code generated by client-gen. DO NOT EDIT. + // This package has the automatically generated clientset. package versioned diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/fake/clientset_generated.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/fake/clientset_generated.go index 6c34b2789..44ca72357 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/fake/clientset_generated.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/fake/clientset_generated.go @@ -13,6 +13,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ + +// Code generated by client-gen. DO NOT EDIT. + package fake import ( diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/fake/doc.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/fake/doc.go index e09c93167..e8b193cf9 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/fake/doc.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/fake/doc.go @@ -13,5 +13,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ + +// Code generated by client-gen. DO NOT EDIT. + // This package has the automatically generated fake clientset. package fake diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/fake/register.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/fake/register.go index c72d7a68c..ee42d3210 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/fake/register.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/fake/register.go @@ -13,6 +13,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ + +// Code generated by client-gen. DO NOT EDIT. + package fake import ( @@ -21,7 +24,7 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" schema "k8s.io/apimachinery/pkg/runtime/schema" serializer "k8s.io/apimachinery/pkg/runtime/serializer" - util_runtime "k8s.io/apimachinery/pkg/util/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" ) var scheme = runtime.NewScheme() @@ -49,5 +52,5 @@ var AddToScheme = localSchemeBuilder.AddToScheme func init() { v1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"}) - util_runtime.Must(AddToScheme(scheme)) + utilruntime.Must(AddToScheme(scheme)) } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme/doc.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme/doc.go index 11014abe5..c543eeb57 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme/doc.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme/doc.go @@ -13,5 +13,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ + +// Code generated by client-gen. DO NOT EDIT. + // This package contains the scheme of the automatically generated clientset. package scheme diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme/register.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme/register.go index ec4be41a6..b59db5f18 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme/register.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme/register.go @@ -13,6 +13,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ + +// Code generated by client-gen. DO NOT EDIT. + package scheme import ( @@ -21,7 +24,7 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" schema "k8s.io/apimachinery/pkg/runtime/schema" serializer "k8s.io/apimachinery/pkg/runtime/serializer" - util_runtime "k8s.io/apimachinery/pkg/util/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" ) var Scheme = runtime.NewScheme() @@ -49,5 +52,5 @@ var AddToScheme = localSchemeBuilder.AddToScheme func init() { v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"}) - util_runtime.Must(AddToScheme(Scheme)) + utilruntime.Must(AddToScheme(Scheme)) } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/clustertask.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/clustertask.go index 5254440da..b92785315 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/clustertask.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/clustertask.go @@ -13,6 +13,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ + +// Code generated by client-gen. DO NOT EDIT. + package v1alpha1 import ( diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/condition.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/condition.go new file mode 100644 index 000000000..0ff7825b1 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/condition.go @@ -0,0 +1,157 @@ +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" + scheme "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// ConditionsGetter has a method to return a ConditionInterface. +// A group's client should implement this interface. +type ConditionsGetter interface { + Conditions(namespace string) ConditionInterface +} + +// ConditionInterface has methods to work with Condition resources. +type ConditionInterface interface { + Create(*v1alpha1.Condition) (*v1alpha1.Condition, error) + Update(*v1alpha1.Condition) (*v1alpha1.Condition, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha1.Condition, error) + List(opts v1.ListOptions) (*v1alpha1.ConditionList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Condition, err error) + ConditionExpansion +} + +// conditions implements ConditionInterface +type conditions struct { + client rest.Interface + ns string +} + +// newConditions returns a Conditions +func newConditions(c *TektonV1alpha1Client, namespace string) *conditions { + return &conditions{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the condition, and returns the corresponding condition object, and an error if there is any. +func (c *conditions) Get(name string, options v1.GetOptions) (result *v1alpha1.Condition, err error) { + result = &v1alpha1.Condition{} + err = c.client.Get(). + Namespace(c.ns). + Resource("conditions"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Conditions that match those selectors. +func (c *conditions) List(opts v1.ListOptions) (result *v1alpha1.ConditionList, err error) { + result = &v1alpha1.ConditionList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("conditions"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested conditions. +func (c *conditions) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("conditions"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Create takes the representation of a condition and creates it. Returns the server's representation of the condition, and an error, if there is any. +func (c *conditions) Create(condition *v1alpha1.Condition) (result *v1alpha1.Condition, err error) { + result = &v1alpha1.Condition{} + err = c.client.Post(). + Namespace(c.ns). + Resource("conditions"). + Body(condition). + Do(). + Into(result) + return +} + +// Update takes the representation of a condition and updates it. Returns the server's representation of the condition, and an error, if there is any. +func (c *conditions) Update(condition *v1alpha1.Condition) (result *v1alpha1.Condition, err error) { + result = &v1alpha1.Condition{} + err = c.client.Put(). + Namespace(c.ns). + Resource("conditions"). + Name(condition.Name). + Body(condition). + Do(). + Into(result) + return +} + +// Delete takes name of the condition and deletes it. Returns an error if one occurs. +func (c *conditions) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("conditions"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *conditions) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("conditions"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched condition. +func (c *conditions) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Condition, err error) { + result = &v1alpha1.Condition{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("conditions"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/doc.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/doc.go index 15a6d25fc..8151c2ea6 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/doc.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/doc.go @@ -13,5 +13,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ + +// Code generated by client-gen. DO NOT EDIT. + // This package has the automatically generated typed clients. package v1alpha1 diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/fake/doc.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/fake/doc.go index 639a5c70c..1ae476cf9 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/fake/doc.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/fake/doc.go @@ -13,5 +13,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ + +// Code generated by client-gen. DO NOT EDIT. + // Package fake has the automatically generated clients. package fake diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/fake/fake_clustertask.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/fake/fake_clustertask.go index cc3e95834..bfb8d0c15 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/fake/fake_clustertask.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/fake/fake_clustertask.go @@ -13,6 +13,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ + +// Code generated by client-gen. DO NOT EDIT. + package fake import ( diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/fake/fake_condition.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/fake/fake_condition.go new file mode 100644 index 000000000..3032a7f91 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/fake/fake_condition.go @@ -0,0 +1,128 @@ +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeConditions implements ConditionInterface +type FakeConditions struct { + Fake *FakeTektonV1alpha1 + ns string +} + +var conditionsResource = schema.GroupVersionResource{Group: "tekton.dev", Version: "v1alpha1", Resource: "conditions"} + +var conditionsKind = schema.GroupVersionKind{Group: "tekton.dev", Version: "v1alpha1", Kind: "Condition"} + +// Get takes name of the condition, and returns the corresponding condition object, and an error if there is any. +func (c *FakeConditions) Get(name string, options v1.GetOptions) (result *v1alpha1.Condition, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(conditionsResource, c.ns, name), &v1alpha1.Condition{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.Condition), err +} + +// List takes label and field selectors, and returns the list of Conditions that match those selectors. +func (c *FakeConditions) List(opts v1.ListOptions) (result *v1alpha1.ConditionList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(conditionsResource, conditionsKind, c.ns, opts), &v1alpha1.ConditionList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.ConditionList{ListMeta: obj.(*v1alpha1.ConditionList).ListMeta} + for _, item := range obj.(*v1alpha1.ConditionList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested conditions. +func (c *FakeConditions) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(conditionsResource, c.ns, opts)) + +} + +// Create takes the representation of a condition and creates it. Returns the server's representation of the condition, and an error, if there is any. +func (c *FakeConditions) Create(condition *v1alpha1.Condition) (result *v1alpha1.Condition, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(conditionsResource, c.ns, condition), &v1alpha1.Condition{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.Condition), err +} + +// Update takes the representation of a condition and updates it. Returns the server's representation of the condition, and an error, if there is any. +func (c *FakeConditions) Update(condition *v1alpha1.Condition) (result *v1alpha1.Condition, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(conditionsResource, c.ns, condition), &v1alpha1.Condition{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.Condition), err +} + +// Delete takes name of the condition and deletes it. Returns an error if one occurs. +func (c *FakeConditions) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(conditionsResource, c.ns, name), &v1alpha1.Condition{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeConditions) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(conditionsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1alpha1.ConditionList{}) + return err +} + +// Patch applies the patch and returns the patched condition. +func (c *FakeConditions) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Condition, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(conditionsResource, c.ns, name, data, subresources...), &v1alpha1.Condition{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.Condition), err +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/fake/fake_pipeline.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/fake/fake_pipeline.go index 58e21a551..4240505a8 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/fake/fake_pipeline.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/fake/fake_pipeline.go @@ -13,6 +13,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ + +// Code generated by client-gen. DO NOT EDIT. + package fake import ( diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/fake/fake_pipeline_client.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/fake/fake_pipeline_client.go index 551472821..d97828837 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/fake/fake_pipeline_client.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/fake/fake_pipeline_client.go @@ -13,6 +13,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ + +// Code generated by client-gen. DO NOT EDIT. + package fake import ( @@ -29,6 +32,10 @@ func (c *FakeTektonV1alpha1) ClusterTasks() v1alpha1.ClusterTaskInterface { return &FakeClusterTasks{c} } +func (c *FakeTektonV1alpha1) Conditions(namespace string) v1alpha1.ConditionInterface { + return &FakeConditions{c, namespace} +} + func (c *FakeTektonV1alpha1) Pipelines(namespace string) v1alpha1.PipelineInterface { return &FakePipelines{c, namespace} } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/fake/fake_pipelineresource.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/fake/fake_pipelineresource.go index 6d370d323..894075fdf 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/fake/fake_pipelineresource.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/fake/fake_pipelineresource.go @@ -13,6 +13,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ + +// Code generated by client-gen. DO NOT EDIT. + package fake import ( diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/fake/fake_pipelinerun.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/fake/fake_pipelinerun.go index a408cf054..88fbe36ca 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/fake/fake_pipelinerun.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/fake/fake_pipelinerun.go @@ -13,6 +13,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ + +// Code generated by client-gen. DO NOT EDIT. + package fake import ( diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/fake/fake_task.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/fake/fake_task.go index 38b16412f..fb00f5862 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/fake/fake_task.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/fake/fake_task.go @@ -13,6 +13,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ + +// Code generated by client-gen. DO NOT EDIT. + package fake import ( diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/fake/fake_taskrun.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/fake/fake_taskrun.go index a9f438b46..dfaaac73b 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/fake/fake_taskrun.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/fake/fake_taskrun.go @@ -13,6 +13,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ + +// Code generated by client-gen. DO NOT EDIT. + package fake import ( diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/generated_expansion.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/generated_expansion.go index 135b91b84..1270fa3ab 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/generated_expansion.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/generated_expansion.go @@ -13,10 +13,15 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ + +// Code generated by client-gen. DO NOT EDIT. + package v1alpha1 type ClusterTaskExpansion interface{} +type ConditionExpansion interface{} + type PipelineExpansion interface{} type PipelineResourceExpansion interface{} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/pipeline.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/pipeline.go index 42debecf5..a5292b53f 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/pipeline.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/pipeline.go @@ -13,6 +13,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ + +// Code generated by client-gen. DO NOT EDIT. + package v1alpha1 import ( diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/pipeline_client.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/pipeline_client.go index 1ea047aed..8efca8540 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/pipeline_client.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/pipeline_client.go @@ -13,6 +13,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ + +// Code generated by client-gen. DO NOT EDIT. + package v1alpha1 import ( @@ -25,6 +28,7 @@ import ( type TektonV1alpha1Interface interface { RESTClient() rest.Interface ClusterTasksGetter + ConditionsGetter PipelinesGetter PipelineResourcesGetter PipelineRunsGetter @@ -41,6 +45,10 @@ func (c *TektonV1alpha1Client) ClusterTasks() ClusterTaskInterface { return newClusterTasks(c) } +func (c *TektonV1alpha1Client) Conditions(namespace string) ConditionInterface { + return newConditions(c, namespace) +} + func (c *TektonV1alpha1Client) Pipelines(namespace string) PipelineInterface { return newPipelines(c, namespace) } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/pipelineresource.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/pipelineresource.go index 06d0eb4be..64761fec2 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/pipelineresource.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/pipelineresource.go @@ -13,6 +13,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ + +// Code generated by client-gen. DO NOT EDIT. + package v1alpha1 import ( diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/pipelinerun.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/pipelinerun.go index 9cc511943..52248eb38 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/pipelinerun.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/pipelinerun.go @@ -13,6 +13,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ + +// Code generated by client-gen. DO NOT EDIT. + package v1alpha1 import ( diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/task.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/task.go index 1ad6131dc..8fd3c348d 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/task.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/task.go @@ -13,6 +13,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ + +// Code generated by client-gen. DO NOT EDIT. + package v1alpha1 import ( diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/taskrun.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/taskrun.go index 1929e4808..48d8774d6 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/taskrun.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/taskrun.go @@ -13,6 +13,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ + +// Code generated by client-gen. DO NOT EDIT. + package v1alpha1 import ( diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/factory.go b/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/factory.go index 8ef470d93..c7e60e595 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/factory.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/factory.go @@ -13,6 +13,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ + +// Code generated by informer-gen. DO NOT EDIT. + package externalversions import ( diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/generic.go b/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/generic.go index 7216523ff..a8f87a563 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/generic.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/generic.go @@ -13,6 +13,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ + +// Code generated by informer-gen. DO NOT EDIT. + package externalversions import ( @@ -52,6 +55,8 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource // Group=tekton.dev, Version=v1alpha1 case v1alpha1.SchemeGroupVersion.WithResource("clustertasks"): return &genericInformer{resource: resource.GroupResource(), informer: f.Tekton().V1alpha1().ClusterTasks().Informer()}, nil + case v1alpha1.SchemeGroupVersion.WithResource("conditions"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Tekton().V1alpha1().Conditions().Informer()}, nil case v1alpha1.SchemeGroupVersion.WithResource("pipelines"): return &genericInformer{resource: resource.GroupResource(), informer: f.Tekton().V1alpha1().Pipelines().Informer()}, nil case v1alpha1.SchemeGroupVersion.WithResource("pipelineresources"): diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go b/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go index 915d411b5..a5f4a5edc 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go @@ -13,6 +13,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ + +// Code generated by informer-gen. DO NOT EDIT. + package internalinterfaces import ( diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/interface.go b/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/interface.go index 9c8e0edf1..c31159ee4 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/interface.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/interface.go @@ -13,6 +13,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ + +// Code generated by informer-gen. DO NOT EDIT. + package tekton import ( diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1alpha1/clustertask.go b/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1alpha1/clustertask.go index a18b80560..ef1a2b83a 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1alpha1/clustertask.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1alpha1/clustertask.go @@ -13,12 +13,15 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ + +// Code generated by informer-gen. DO NOT EDIT. + package v1alpha1 import ( time "time" - pipeline_v1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" + pipelinev1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" versioned "github.com/tektoncd/pipeline/pkg/client/clientset/versioned" internalinterfaces "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/internalinterfaces" v1alpha1 "github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1alpha1" @@ -66,7 +69,7 @@ func NewFilteredClusterTaskInformer(client versioned.Interface, resyncPeriod tim return client.TektonV1alpha1().ClusterTasks().Watch(options) }, }, - &pipeline_v1alpha1.ClusterTask{}, + &pipelinev1alpha1.ClusterTask{}, resyncPeriod, indexers, ) @@ -77,7 +80,7 @@ func (f *clusterTaskInformer) defaultInformer(client versioned.Interface, resync } func (f *clusterTaskInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&pipeline_v1alpha1.ClusterTask{}, f.defaultInformer) + return f.factory.InformerFor(&pipelinev1alpha1.ClusterTask{}, f.defaultInformer) } func (f *clusterTaskInformer) Lister() v1alpha1.ClusterTaskLister { diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1alpha1/condition.go b/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1alpha1/condition.go new file mode 100644 index 000000000..5ebf5400b --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1alpha1/condition.go @@ -0,0 +1,89 @@ +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + time "time" + + pipelinev1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" + versioned "github.com/tektoncd/pipeline/pkg/client/clientset/versioned" + internalinterfaces "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/internalinterfaces" + v1alpha1 "github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// ConditionInformer provides access to a shared informer and lister for +// Conditions. +type ConditionInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.ConditionLister +} + +type conditionInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewConditionInformer constructs a new informer for Condition type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewConditionInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredConditionInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredConditionInformer constructs a new informer for Condition type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredConditionInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.TektonV1alpha1().Conditions(namespace).List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.TektonV1alpha1().Conditions(namespace).Watch(options) + }, + }, + &pipelinev1alpha1.Condition{}, + resyncPeriod, + indexers, + ) +} + +func (f *conditionInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredConditionInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *conditionInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&pipelinev1alpha1.Condition{}, f.defaultInformer) +} + +func (f *conditionInformer) Lister() v1alpha1.ConditionLister { + return v1alpha1.NewConditionLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1alpha1/interface.go b/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1alpha1/interface.go index e622462c7..8b0fe9879 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1alpha1/interface.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1alpha1/interface.go @@ -13,6 +13,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ + +// Code generated by informer-gen. DO NOT EDIT. + package v1alpha1 import ( @@ -23,6 +26,8 @@ import ( type Interface interface { // ClusterTasks returns a ClusterTaskInformer. ClusterTasks() ClusterTaskInformer + // Conditions returns a ConditionInformer. + Conditions() ConditionInformer // Pipelines returns a PipelineInformer. Pipelines() PipelineInformer // PipelineResources returns a PipelineResourceInformer. @@ -51,6 +56,11 @@ func (v *version) ClusterTasks() ClusterTaskInformer { return &clusterTaskInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} } +// Conditions returns a ConditionInformer. +func (v *version) Conditions() ConditionInformer { + return &conditionInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + // Pipelines returns a PipelineInformer. func (v *version) Pipelines() PipelineInformer { return &pipelineInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1alpha1/pipeline.go b/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1alpha1/pipeline.go index 224fa2a09..4f385d1f1 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1alpha1/pipeline.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1alpha1/pipeline.go @@ -13,12 +13,15 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ + +// Code generated by informer-gen. DO NOT EDIT. + package v1alpha1 import ( time "time" - pipeline_v1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" + pipelinev1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" versioned "github.com/tektoncd/pipeline/pkg/client/clientset/versioned" internalinterfaces "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/internalinterfaces" v1alpha1 "github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1alpha1" @@ -67,7 +70,7 @@ func NewFilteredPipelineInformer(client versioned.Interface, namespace string, r return client.TektonV1alpha1().Pipelines(namespace).Watch(options) }, }, - &pipeline_v1alpha1.Pipeline{}, + &pipelinev1alpha1.Pipeline{}, resyncPeriod, indexers, ) @@ -78,7 +81,7 @@ func (f *pipelineInformer) defaultInformer(client versioned.Interface, resyncPer } func (f *pipelineInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&pipeline_v1alpha1.Pipeline{}, f.defaultInformer) + return f.factory.InformerFor(&pipelinev1alpha1.Pipeline{}, f.defaultInformer) } func (f *pipelineInformer) Lister() v1alpha1.PipelineLister { diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1alpha1/pipelineresource.go b/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1alpha1/pipelineresource.go index 5068042c4..52f83148d 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1alpha1/pipelineresource.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1alpha1/pipelineresource.go @@ -13,12 +13,15 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ + +// Code generated by informer-gen. DO NOT EDIT. + package v1alpha1 import ( time "time" - pipeline_v1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" + pipelinev1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" versioned "github.com/tektoncd/pipeline/pkg/client/clientset/versioned" internalinterfaces "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/internalinterfaces" v1alpha1 "github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1alpha1" @@ -67,7 +70,7 @@ func NewFilteredPipelineResourceInformer(client versioned.Interface, namespace s return client.TektonV1alpha1().PipelineResources(namespace).Watch(options) }, }, - &pipeline_v1alpha1.PipelineResource{}, + &pipelinev1alpha1.PipelineResource{}, resyncPeriod, indexers, ) @@ -78,7 +81,7 @@ func (f *pipelineResourceInformer) defaultInformer(client versioned.Interface, r } func (f *pipelineResourceInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&pipeline_v1alpha1.PipelineResource{}, f.defaultInformer) + return f.factory.InformerFor(&pipelinev1alpha1.PipelineResource{}, f.defaultInformer) } func (f *pipelineResourceInformer) Lister() v1alpha1.PipelineResourceLister { diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1alpha1/pipelinerun.go b/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1alpha1/pipelinerun.go index b993c45cf..562faada3 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1alpha1/pipelinerun.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1alpha1/pipelinerun.go @@ -13,12 +13,15 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ + +// Code generated by informer-gen. DO NOT EDIT. + package v1alpha1 import ( time "time" - pipeline_v1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" + pipelinev1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" versioned "github.com/tektoncd/pipeline/pkg/client/clientset/versioned" internalinterfaces "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/internalinterfaces" v1alpha1 "github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1alpha1" @@ -67,7 +70,7 @@ func NewFilteredPipelineRunInformer(client versioned.Interface, namespace string return client.TektonV1alpha1().PipelineRuns(namespace).Watch(options) }, }, - &pipeline_v1alpha1.PipelineRun{}, + &pipelinev1alpha1.PipelineRun{}, resyncPeriod, indexers, ) @@ -78,7 +81,7 @@ func (f *pipelineRunInformer) defaultInformer(client versioned.Interface, resync } func (f *pipelineRunInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&pipeline_v1alpha1.PipelineRun{}, f.defaultInformer) + return f.factory.InformerFor(&pipelinev1alpha1.PipelineRun{}, f.defaultInformer) } func (f *pipelineRunInformer) Lister() v1alpha1.PipelineRunLister { diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1alpha1/task.go b/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1alpha1/task.go index 8e4d94ccf..f1e5df45e 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1alpha1/task.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1alpha1/task.go @@ -13,12 +13,15 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ + +// Code generated by informer-gen. DO NOT EDIT. + package v1alpha1 import ( time "time" - pipeline_v1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" + pipelinev1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" versioned "github.com/tektoncd/pipeline/pkg/client/clientset/versioned" internalinterfaces "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/internalinterfaces" v1alpha1 "github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1alpha1" @@ -67,7 +70,7 @@ func NewFilteredTaskInformer(client versioned.Interface, namespace string, resyn return client.TektonV1alpha1().Tasks(namespace).Watch(options) }, }, - &pipeline_v1alpha1.Task{}, + &pipelinev1alpha1.Task{}, resyncPeriod, indexers, ) @@ -78,7 +81,7 @@ func (f *taskInformer) defaultInformer(client versioned.Interface, resyncPeriod } func (f *taskInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&pipeline_v1alpha1.Task{}, f.defaultInformer) + return f.factory.InformerFor(&pipelinev1alpha1.Task{}, f.defaultInformer) } func (f *taskInformer) Lister() v1alpha1.TaskLister { diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1alpha1/taskrun.go b/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1alpha1/taskrun.go index 16b08c62f..67a77b4b9 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1alpha1/taskrun.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1alpha1/taskrun.go @@ -13,12 +13,15 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ + +// Code generated by informer-gen. DO NOT EDIT. + package v1alpha1 import ( time "time" - pipeline_v1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" + pipelinev1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" versioned "github.com/tektoncd/pipeline/pkg/client/clientset/versioned" internalinterfaces "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/internalinterfaces" v1alpha1 "github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1alpha1" @@ -67,7 +70,7 @@ func NewFilteredTaskRunInformer(client versioned.Interface, namespace string, re return client.TektonV1alpha1().TaskRuns(namespace).Watch(options) }, }, - &pipeline_v1alpha1.TaskRun{}, + &pipelinev1alpha1.TaskRun{}, resyncPeriod, indexers, ) @@ -78,7 +81,7 @@ func (f *taskRunInformer) defaultInformer(client versioned.Interface, resyncPeri } func (f *taskRunInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&pipeline_v1alpha1.TaskRun{}, f.defaultInformer) + return f.factory.InformerFor(&pipelinev1alpha1.TaskRun{}, f.defaultInformer) } func (f *taskRunInformer) Lister() v1alpha1.TaskRunLister { diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/injection/client/client.go b/vendor/github.com/tektoncd/pipeline/pkg/client/injection/client/client.go new file mode 100644 index 000000000..40ae956fe --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/injection/client/client.go @@ -0,0 +1,49 @@ +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package client + +import ( + "context" + + versioned "github.com/tektoncd/pipeline/pkg/client/clientset/versioned" + rest "k8s.io/client-go/rest" + injection "knative.dev/pkg/injection" + logging "knative.dev/pkg/logging" +) + +func init() { + injection.Default.RegisterClient(withClient) +} + +// Key is used as the key for associating information with a context.Context. +type Key struct{} + +func withClient(ctx context.Context, cfg *rest.Config) context.Context { + return context.WithValue(ctx, Key{}, versioned.NewForConfigOrDie(cfg)) +} + +// Get extracts the versioned.Interface client from the context. +func Get(ctx context.Context) versioned.Interface { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Fatalf( + "Unable to fetch %T from context.", (versioned.Interface)(nil)) + } + return untyped.(versioned.Interface) +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/injection/client/fake/fake.go b/vendor/github.com/tektoncd/pipeline/pkg/client/injection/client/fake/fake.go new file mode 100644 index 000000000..c35be01b1 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/injection/client/fake/fake.go @@ -0,0 +1,54 @@ +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + fake "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/fake" + client "github.com/tektoncd/pipeline/pkg/client/injection/client" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/rest" + injection "knative.dev/pkg/injection" + logging "knative.dev/pkg/logging" +) + +func init() { + injection.Fake.RegisterClient(withClient) +} + +func withClient(ctx context.Context, cfg *rest.Config) context.Context { + ctx, _ = With(ctx) + return ctx +} + +func With(ctx context.Context, objects ...runtime.Object) (context.Context, *fake.Clientset) { + cs := fake.NewSimpleClientset(objects...) + return context.WithValue(ctx, client.Key{}, cs), cs +} + +// Get extracts the Kubernetes client from the context. +func Get(ctx context.Context) *fake.Clientset { + untyped := ctx.Value(client.Key{}) + if untyped == nil { + logging.FromContext(ctx).Fatalf( + "Unable to fetch %T from context.", (*fake.Clientset)(nil)) + } + return untyped.(*fake.Clientset) +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/factory/fake/fake.go b/vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/factory/fake/fake.go new file mode 100644 index 000000000..3ac8f4078 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/factory/fake/fake.go @@ -0,0 +1,41 @@ +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + externalversions "github.com/tektoncd/pipeline/pkg/client/informers/externalversions" + fake "github.com/tektoncd/pipeline/pkg/client/injection/client/fake" + factory "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/factory" + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" +) + +var Get = factory.Get + +func init() { + injection.Fake.RegisterInformerFactory(withInformerFactory) +} + +func withInformerFactory(ctx context.Context) context.Context { + c := fake.Get(ctx) + return context.WithValue(ctx, factory.Key{}, + externalversions.NewSharedInformerFactory(c, controller.GetResyncPeriod(ctx))) +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/factory/pipelinefactory.go b/vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/factory/pipelinefactory.go new file mode 100644 index 000000000..755b0df2b --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/factory/pipelinefactory.go @@ -0,0 +1,52 @@ +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package pipelinefactory + +import ( + "context" + + externalversions "github.com/tektoncd/pipeline/pkg/client/informers/externalversions" + client "github.com/tektoncd/pipeline/pkg/client/injection/client" + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + logging "knative.dev/pkg/logging" +) + +func init() { + injection.Default.RegisterInformerFactory(withInformerFactory) +} + +// Key is used as the key for associating information with a context.Context. +type Key struct{} + +func withInformerFactory(ctx context.Context) context.Context { + c := client.Get(ctx) + return context.WithValue(ctx, Key{}, + externalversions.NewSharedInformerFactory(c, controller.GetResyncPeriod(ctx))) +} + +// Get extracts the InformerFactory from the context. +func Get(ctx context.Context) externalversions.SharedInformerFactory { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Fatalf( + "Unable to fetch %T from context.", (externalversions.SharedInformerFactory)(nil)) + } + return untyped.(externalversions.SharedInformerFactory) +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/clustertask/clustertask.go b/vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/clustertask/clustertask.go new file mode 100644 index 000000000..e8dbe64cc --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/clustertask/clustertask.go @@ -0,0 +1,52 @@ +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package clustertask + +import ( + "context" + + v1alpha1 "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1alpha1" + factory "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/factory" + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + logging "knative.dev/pkg/logging" +) + +func init() { + injection.Default.RegisterInformer(withInformer) +} + +// Key is used for associating the Informer inside the context.Context. +type Key struct{} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := factory.Get(ctx) + inf := f.Tekton().V1alpha1().ClusterTasks() + return context.WithValue(ctx, Key{}, inf), inf.Informer() +} + +// Get extracts the typed informer from the context. +func Get(ctx context.Context) v1alpha1.ClusterTaskInformer { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Fatalf( + "Unable to fetch %T from context.", (v1alpha1.ClusterTaskInformer)(nil)) + } + return untyped.(v1alpha1.ClusterTaskInformer) +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/clustertask/fake/fake.go b/vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/clustertask/fake/fake.go new file mode 100644 index 000000000..c22974be8 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/clustertask/fake/fake.go @@ -0,0 +1,40 @@ +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + fake "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/factory/fake" + clustertask "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/clustertask" + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" +) + +var Get = clustertask.Get + +func init() { + injection.Fake.RegisterInformer(withInformer) +} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := fake.Get(ctx) + inf := f.Tekton().V1alpha1().ClusterTasks() + return context.WithValue(ctx, clustertask.Key{}, inf), inf.Informer() +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/condition/condition.go b/vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/condition/condition.go new file mode 100644 index 000000000..8dfe93f57 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/condition/condition.go @@ -0,0 +1,52 @@ +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package condition + +import ( + "context" + + v1alpha1 "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1alpha1" + factory "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/factory" + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + logging "knative.dev/pkg/logging" +) + +func init() { + injection.Default.RegisterInformer(withInformer) +} + +// Key is used for associating the Informer inside the context.Context. +type Key struct{} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := factory.Get(ctx) + inf := f.Tekton().V1alpha1().Conditions() + return context.WithValue(ctx, Key{}, inf), inf.Informer() +} + +// Get extracts the typed informer from the context. +func Get(ctx context.Context) v1alpha1.ConditionInformer { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Fatalf( + "Unable to fetch %T from context.", (v1alpha1.ConditionInformer)(nil)) + } + return untyped.(v1alpha1.ConditionInformer) +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/condition/fake/fake.go b/vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/condition/fake/fake.go new file mode 100644 index 000000000..01b3e2d2e --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/condition/fake/fake.go @@ -0,0 +1,40 @@ +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + fake "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/factory/fake" + condition "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/condition" + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" +) + +var Get = condition.Get + +func init() { + injection.Fake.RegisterInformer(withInformer) +} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := fake.Get(ctx) + inf := f.Tekton().V1alpha1().Conditions() + return context.WithValue(ctx, condition.Key{}, inf), inf.Informer() +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/pipeline/fake/fake.go b/vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/pipeline/fake/fake.go new file mode 100644 index 000000000..62c70f11e --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/pipeline/fake/fake.go @@ -0,0 +1,40 @@ +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + fake "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/factory/fake" + pipeline "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/pipeline" + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" +) + +var Get = pipeline.Get + +func init() { + injection.Fake.RegisterInformer(withInformer) +} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := fake.Get(ctx) + inf := f.Tekton().V1alpha1().Pipelines() + return context.WithValue(ctx, pipeline.Key{}, inf), inf.Informer() +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/pipeline/pipeline.go b/vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/pipeline/pipeline.go new file mode 100644 index 000000000..9c7aaee3e --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/pipeline/pipeline.go @@ -0,0 +1,52 @@ +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package pipeline + +import ( + "context" + + v1alpha1 "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1alpha1" + factory "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/factory" + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + logging "knative.dev/pkg/logging" +) + +func init() { + injection.Default.RegisterInformer(withInformer) +} + +// Key is used for associating the Informer inside the context.Context. +type Key struct{} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := factory.Get(ctx) + inf := f.Tekton().V1alpha1().Pipelines() + return context.WithValue(ctx, Key{}, inf), inf.Informer() +} + +// Get extracts the typed informer from the context. +func Get(ctx context.Context) v1alpha1.PipelineInformer { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Fatalf( + "Unable to fetch %T from context.", (v1alpha1.PipelineInformer)(nil)) + } + return untyped.(v1alpha1.PipelineInformer) +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/pipelineresource/fake/fake.go b/vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/pipelineresource/fake/fake.go new file mode 100644 index 000000000..c346d980b --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/pipelineresource/fake/fake.go @@ -0,0 +1,40 @@ +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + fake "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/factory/fake" + pipelineresource "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/pipelineresource" + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" +) + +var Get = pipelineresource.Get + +func init() { + injection.Fake.RegisterInformer(withInformer) +} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := fake.Get(ctx) + inf := f.Tekton().V1alpha1().PipelineResources() + return context.WithValue(ctx, pipelineresource.Key{}, inf), inf.Informer() +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/pipelineresource/pipelineresource.go b/vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/pipelineresource/pipelineresource.go new file mode 100644 index 000000000..675974f7a --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/pipelineresource/pipelineresource.go @@ -0,0 +1,52 @@ +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package pipelineresource + +import ( + "context" + + v1alpha1 "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1alpha1" + factory "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/factory" + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + logging "knative.dev/pkg/logging" +) + +func init() { + injection.Default.RegisterInformer(withInformer) +} + +// Key is used for associating the Informer inside the context.Context. +type Key struct{} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := factory.Get(ctx) + inf := f.Tekton().V1alpha1().PipelineResources() + return context.WithValue(ctx, Key{}, inf), inf.Informer() +} + +// Get extracts the typed informer from the context. +func Get(ctx context.Context) v1alpha1.PipelineResourceInformer { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Fatalf( + "Unable to fetch %T from context.", (v1alpha1.PipelineResourceInformer)(nil)) + } + return untyped.(v1alpha1.PipelineResourceInformer) +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/pipelinerun/fake/fake.go b/vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/pipelinerun/fake/fake.go new file mode 100644 index 000000000..58942f78c --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/pipelinerun/fake/fake.go @@ -0,0 +1,40 @@ +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + fake "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/factory/fake" + pipelinerun "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/pipelinerun" + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" +) + +var Get = pipelinerun.Get + +func init() { + injection.Fake.RegisterInformer(withInformer) +} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := fake.Get(ctx) + inf := f.Tekton().V1alpha1().PipelineRuns() + return context.WithValue(ctx, pipelinerun.Key{}, inf), inf.Informer() +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/pipelinerun/pipelinerun.go b/vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/pipelinerun/pipelinerun.go new file mode 100644 index 000000000..ef65c365c --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/pipelinerun/pipelinerun.go @@ -0,0 +1,52 @@ +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package pipelinerun + +import ( + "context" + + v1alpha1 "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1alpha1" + factory "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/factory" + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + logging "knative.dev/pkg/logging" +) + +func init() { + injection.Default.RegisterInformer(withInformer) +} + +// Key is used for associating the Informer inside the context.Context. +type Key struct{} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := factory.Get(ctx) + inf := f.Tekton().V1alpha1().PipelineRuns() + return context.WithValue(ctx, Key{}, inf), inf.Informer() +} + +// Get extracts the typed informer from the context. +func Get(ctx context.Context) v1alpha1.PipelineRunInformer { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Fatalf( + "Unable to fetch %T from context.", (v1alpha1.PipelineRunInformer)(nil)) + } + return untyped.(v1alpha1.PipelineRunInformer) +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/task/fake/fake.go b/vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/task/fake/fake.go new file mode 100644 index 000000000..d92a457fd --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/task/fake/fake.go @@ -0,0 +1,40 @@ +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + fake "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/factory/fake" + task "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/task" + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" +) + +var Get = task.Get + +func init() { + injection.Fake.RegisterInformer(withInformer) +} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := fake.Get(ctx) + inf := f.Tekton().V1alpha1().Tasks() + return context.WithValue(ctx, task.Key{}, inf), inf.Informer() +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/task/task.go b/vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/task/task.go new file mode 100644 index 000000000..d6f379587 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/task/task.go @@ -0,0 +1,52 @@ +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package task + +import ( + "context" + + v1alpha1 "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1alpha1" + factory "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/factory" + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + logging "knative.dev/pkg/logging" +) + +func init() { + injection.Default.RegisterInformer(withInformer) +} + +// Key is used for associating the Informer inside the context.Context. +type Key struct{} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := factory.Get(ctx) + inf := f.Tekton().V1alpha1().Tasks() + return context.WithValue(ctx, Key{}, inf), inf.Informer() +} + +// Get extracts the typed informer from the context. +func Get(ctx context.Context) v1alpha1.TaskInformer { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Fatalf( + "Unable to fetch %T from context.", (v1alpha1.TaskInformer)(nil)) + } + return untyped.(v1alpha1.TaskInformer) +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/taskrun/fake/fake.go b/vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/taskrun/fake/fake.go new file mode 100644 index 000000000..f3d547e7f --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/taskrun/fake/fake.go @@ -0,0 +1,40 @@ +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + fake "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/factory/fake" + taskrun "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/taskrun" + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" +) + +var Get = taskrun.Get + +func init() { + injection.Fake.RegisterInformer(withInformer) +} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := fake.Get(ctx) + inf := f.Tekton().V1alpha1().TaskRuns() + return context.WithValue(ctx, taskrun.Key{}, inf), inf.Informer() +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/taskrun/taskrun.go b/vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/taskrun/taskrun.go new file mode 100644 index 000000000..2618c0cb4 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/taskrun/taskrun.go @@ -0,0 +1,52 @@ +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package taskrun + +import ( + "context" + + v1alpha1 "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1alpha1" + factory "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/factory" + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + logging "knative.dev/pkg/logging" +) + +func init() { + injection.Default.RegisterInformer(withInformer) +} + +// Key is used for associating the Informer inside the context.Context. +type Key struct{} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := factory.Get(ctx) + inf := f.Tekton().V1alpha1().TaskRuns() + return context.WithValue(ctx, Key{}, inf), inf.Informer() +} + +// Get extracts the typed informer from the context. +func Get(ctx context.Context) v1alpha1.TaskRunInformer { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Fatalf( + "Unable to fetch %T from context.", (v1alpha1.TaskRunInformer)(nil)) + } + return untyped.(v1alpha1.TaskRunInformer) +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1alpha1/clustertask.go b/vendor/github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1alpha1/clustertask.go index 6b1133ca0..7b38e7584 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1alpha1/clustertask.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1alpha1/clustertask.go @@ -13,6 +13,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ + +// Code generated by lister-gen. DO NOT EDIT. + package v1alpha1 import ( diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1alpha1/condition.go b/vendor/github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1alpha1/condition.go new file mode 100644 index 000000000..d3cf68be7 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1alpha1/condition.go @@ -0,0 +1,94 @@ +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ConditionLister helps list Conditions. +type ConditionLister interface { + // List lists all Conditions in the indexer. + List(selector labels.Selector) (ret []*v1alpha1.Condition, err error) + // Conditions returns an object that can list and get Conditions. + Conditions(namespace string) ConditionNamespaceLister + ConditionListerExpansion +} + +// conditionLister implements the ConditionLister interface. +type conditionLister struct { + indexer cache.Indexer +} + +// NewConditionLister returns a new ConditionLister. +func NewConditionLister(indexer cache.Indexer) ConditionLister { + return &conditionLister{indexer: indexer} +} + +// List lists all Conditions in the indexer. +func (s *conditionLister) List(selector labels.Selector) (ret []*v1alpha1.Condition, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.Condition)) + }) + return ret, err +} + +// Conditions returns an object that can list and get Conditions. +func (s *conditionLister) Conditions(namespace string) ConditionNamespaceLister { + return conditionNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// ConditionNamespaceLister helps list and get Conditions. +type ConditionNamespaceLister interface { + // List lists all Conditions in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1alpha1.Condition, err error) + // Get retrieves the Condition from the indexer for a given namespace and name. + Get(name string) (*v1alpha1.Condition, error) + ConditionNamespaceListerExpansion +} + +// conditionNamespaceLister implements the ConditionNamespaceLister +// interface. +type conditionNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Conditions in the indexer for a given namespace. +func (s conditionNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.Condition, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.Condition)) + }) + return ret, err +} + +// Get retrieves the Condition from the indexer for a given namespace and name. +func (s conditionNamespaceLister) Get(name string) (*v1alpha1.Condition, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("condition"), name) + } + return obj.(*v1alpha1.Condition), nil +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1alpha1/expansion_generated.go b/vendor/github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1alpha1/expansion_generated.go index 6a6baadb9..5a2ff99fe 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1alpha1/expansion_generated.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1alpha1/expansion_generated.go @@ -13,12 +13,23 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ + +// Code generated by lister-gen. DO NOT EDIT. + package v1alpha1 // ClusterTaskListerExpansion allows custom methods to be added to // ClusterTaskLister. type ClusterTaskListerExpansion interface{} +// ConditionListerExpansion allows custom methods to be added to +// ConditionLister. +type ConditionListerExpansion interface{} + +// ConditionNamespaceListerExpansion allows custom methods to be added to +// ConditionNamespaceLister. +type ConditionNamespaceListerExpansion interface{} + // PipelineListerExpansion allows custom methods to be added to // PipelineLister. type PipelineListerExpansion interface{} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1alpha1/pipeline.go b/vendor/github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1alpha1/pipeline.go index ef5416760..8bee545ad 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1alpha1/pipeline.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1alpha1/pipeline.go @@ -13,6 +13,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ + +// Code generated by lister-gen. DO NOT EDIT. + package v1alpha1 import ( diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1alpha1/pipelineresource.go b/vendor/github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1alpha1/pipelineresource.go index 30d721b10..1e9d332c1 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1alpha1/pipelineresource.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1alpha1/pipelineresource.go @@ -13,6 +13,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ + +// Code generated by lister-gen. DO NOT EDIT. + package v1alpha1 import ( diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1alpha1/pipelinerun.go b/vendor/github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1alpha1/pipelinerun.go index 35d7cfa70..3799c57f3 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1alpha1/pipelinerun.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1alpha1/pipelinerun.go @@ -13,6 +13,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ + +// Code generated by lister-gen. DO NOT EDIT. + package v1alpha1 import ( diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1alpha1/task.go b/vendor/github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1alpha1/task.go index 4b04aad4b..3e11a0ba7 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1alpha1/task.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1alpha1/task.go @@ -13,6 +13,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ + +// Code generated by lister-gen. DO NOT EDIT. + package v1alpha1 import ( diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1alpha1/taskrun.go b/vendor/github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1alpha1/taskrun.go index e5ea1629d..458ff7099 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1alpha1/taskrun.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1alpha1/taskrun.go @@ -13,6 +13,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ + +// Code generated by lister-gen. DO NOT EDIT. + package v1alpha1 import ( diff --git a/vendor/github.com/tektoncd/pipeline/pkg/list/diff.go b/vendor/github.com/tektoncd/pipeline/pkg/list/diff.go index 1f139c36f..049fd91c3 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/list/diff.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/list/diff.go @@ -9,7 +9,7 @@ You may obtain a copy of the License at Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either extress or implied. +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ diff --git a/vendor/github.com/tektoncd/pipeline/pkg/reconciler/v1alpha1/pipelinerun/resources/apply.go b/vendor/github.com/tektoncd/pipeline/pkg/reconciler/v1alpha1/pipelinerun/resources/apply.go index 0717c8eb2..389d0fa72 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/reconciler/v1alpha1/pipelinerun/resources/apply.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/reconciler/v1alpha1/pipelinerun/resources/apply.go @@ -1,14 +1,17 @@ /* - Copyright 2019 Knative Authors LLC - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ package resources @@ -17,42 +20,59 @@ import ( "fmt" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" - "github.com/tektoncd/pipeline/pkg/templating" ) // ApplyParameters applies the params from a PipelineRun.Params to a PipelineSpec. func ApplyParameters(p *v1alpha1.Pipeline, pr *v1alpha1.PipelineRun) *v1alpha1.Pipeline { // This assumes that the PipelineRun inputs have been validated against what the Pipeline requests. - replacements := map[string]string{} - // Set all the default replacements + + // stringReplacements is used for standard single-string stringReplacements, while arrayReplacements contains arrays + // that need to be further processed. + stringReplacements := map[string]string{} + arrayReplacements := map[string][]string{} + + // Set all the default stringReplacements for _, p := range p.Spec.Params { - if p.Default != "" { - replacements[fmt.Sprintf("params.%s", p.Name)] = p.Default + if p.Default != nil { + if p.Default.Type == v1alpha1.ParamTypeString { + stringReplacements[fmt.Sprintf("params.%s", p.Name)] = p.Default.StringVal + } else { + arrayReplacements[fmt.Sprintf("params.%s", p.Name)] = p.Default.ArrayVal + } } } // Set and overwrite params with the ones from the PipelineRun for _, p := range pr.Spec.Params { - replacements[fmt.Sprintf("params.%s", p.Name)] = p.Value + if p.Value.Type == v1alpha1.ParamTypeString { + stringReplacements[fmt.Sprintf("params.%s", p.Name)] = p.Value.StringVal + } else { + arrayReplacements[fmt.Sprintf("params.%s", p.Name)] = p.Value.ArrayVal + } } - return ApplyReplacements(p, replacements) + return ApplyReplacements(p, stringReplacements, arrayReplacements) } // ApplyReplacements replaces placeholders for declared parameters with the specified replacements. -func ApplyReplacements(p *v1alpha1.Pipeline, replacements map[string]string) *v1alpha1.Pipeline { +func ApplyReplacements(p *v1alpha1.Pipeline, replacements map[string]string, arrayReplacements map[string][]string) *v1alpha1.Pipeline { p = p.DeepCopy() tasks := p.Spec.Tasks for i := range tasks { - params := tasks[i].Params - - for j := range params { - params[j].Value = templating.ApplyReplacements(params[j].Value, replacements) + tasks[i].Params = replaceParamValues(tasks[i].Params, replacements, arrayReplacements) + for j := range tasks[i].Conditions { + c := tasks[i].Conditions[j] + c.Params = replaceParamValues(c.Params, replacements, arrayReplacements) } - - tasks[i].Params = params } return p } + +func replaceParamValues(params []v1alpha1.Param, stringReplacements map[string]string, arrayReplacements map[string][]string) []v1alpha1.Param { + for i := range params { + params[i].Value.ApplyReplacements(stringReplacements, arrayReplacements) + } + return params +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/reconciler/v1alpha1/pipelinerun/resources/conditionresolution.go b/vendor/github.com/tektoncd/pipeline/pkg/reconciler/v1alpha1/pipelinerun/resources/conditionresolution.go new file mode 100644 index 000000000..3eb25b3da --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/reconciler/v1alpha1/pipelinerun/resources/conditionresolution.go @@ -0,0 +1,134 @@ +/* + * + * Copyright 2019 The Tekton Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package resources + +import ( + "fmt" + + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" + corev1 "k8s.io/api/core/v1" +) + +const ( + // unnamedCheckNamePrefix is the prefix added to the name of a condition's + // spec.Check.Image if the name is missing + unnamedCheckNamePrefix = "condition-check-" +) + +// GetCondition is a function used to retrieve PipelineConditions. +type GetCondition func(string) (*v1alpha1.Condition, error) + +// ResolvedConditionCheck contains a Condition and its associated ConditionCheck, if it +// exists. ConditionCheck can be nil to represent there being no ConditionCheck (i.e the condition +// has not been evaluated). +type ResolvedConditionCheck struct { + ConditionCheckName string + Condition *v1alpha1.Condition + ConditionCheck *v1alpha1.ConditionCheck + PipelineTaskCondition *v1alpha1.PipelineTaskCondition +} + +// TaskConditionCheckState is a slice of ResolvedConditionCheck the represents the current execution +// state of Conditions for a Task in a pipeline run. +type TaskConditionCheckState []*ResolvedConditionCheck + +// HasStarted returns true if the conditionChecks for a given object have been created +func (state TaskConditionCheckState) HasStarted() bool { + hasStarted := true + for _, j := range state { + if j.ConditionCheck == nil { + hasStarted = false + } + } + return hasStarted +} + +// IsComplete returns true if the status for all conditionChecks for a task indicate that they are done +func (state TaskConditionCheckState) IsDone() bool { + if !state.HasStarted() { + return false + } + isDone := true + for _, rcc := range state { + isDone = isDone && rcc.ConditionCheck.IsDone() + } + return isDone +} + +// IsComplete returns true if the status for all conditionChecks for a task indicate they have +// completed successfully +func (state TaskConditionCheckState) IsSuccess() bool { + if !state.IsDone() { + return false + } + isSuccess := true + for _, rcc := range state { + isSuccess = isSuccess && rcc.ConditionCheck.IsSuccessful() + } + return isSuccess +} + +// ConditionToTaskSpec creates a TaskSpec from a given Condition +func (rcc *ResolvedConditionCheck) ConditionToTaskSpec() *v1alpha1.TaskSpec { + if rcc.Condition.Spec.Check.Name == "" { + rcc.Condition.Spec.Check.Name = unnamedCheckNamePrefix + rcc.Condition.Name + } + + t := &v1alpha1.TaskSpec{ + Steps: []v1alpha1.Step{{Container: rcc.Condition.Spec.Check}}, + } + + if len(rcc.Condition.Spec.Params) > 0 { + t.Inputs = &v1alpha1.Inputs{ + Params: rcc.Condition.Spec.Params, + } + // convert param strings of type ${params.x} to ${inputs.params.x} + // in order to apply taskrun substitution + convertParamTemplates(&t.Steps[0], rcc.Condition.Spec.Params) + } + return t +} + +// Replaces all instances of ${params.x} in the container to ${inputs.params.x} for each param name +func convertParamTemplates(step *v1alpha1.Step, params []v1alpha1.ParamSpec) { + replacements := make(map[string]string) + for _, p := range params { + replacements[fmt.Sprintf("params.%s", p.Name)] = fmt.Sprintf("${inputs.params.%s}", p.Name) + v1alpha1.ApplyStepReplacements(step, replacements, map[string][]string{}) + } +} + +// NewConditionCheck status creates a ConditionCheckStatus from a ConditionCheck +func (rcc *ResolvedConditionCheck) NewConditionCheckStatus() *v1alpha1.ConditionCheckStatus { + var checkStep corev1.ContainerState + trs := rcc.ConditionCheck.Status + for _, s := range trs.Steps { + if s.Name == rcc.Condition.Spec.Check.Name { + checkStep = s.ContainerState + break + } + } + + return &v1alpha1.ConditionCheckStatus{ + Status: trs.Status, + PodName: trs.PodName, + StartTime: trs.StartTime, + CompletionTime: trs.CompletionTime, + Check: checkStep, + } +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/reconciler/v1alpha1/pipelinerun/resources/pipelinerunresolution.go b/vendor/github.com/tektoncd/pipeline/pkg/reconciler/v1alpha1/pipelinerun/resources/pipelinerunresolution.go index 74dc29271..b6db8fb92 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/reconciler/v1alpha1/pipelinerun/resources/pipelinerunresolution.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/reconciler/v1alpha1/pipelinerun/resources/pipelinerunresolution.go @@ -18,18 +18,17 @@ package resources import ( "fmt" - "time" + "reflect" - "github.com/knative/pkg/apis" - "github.com/tektoncd/pipeline/pkg/names" "go.uber.org/zap" "golang.org/x/xerrors" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "knative.dev/pkg/apis" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" "github.com/tektoncd/pipeline/pkg/list" + "github.com/tektoncd/pipeline/pkg/names" "github.com/tektoncd/pipeline/pkg/reconciler/v1alpha1/taskrun/resources" ) @@ -48,6 +47,10 @@ const ( // ReasonTimedOut indicates that the PipelineRun has taken longer than its configured // timeout ReasonTimedOut = "PipelineRunTimeout" + + // ReasonConditionCheckFailed indicates that the reason for the failure status is that the + // condition check associated to the pipeline task evaluated to false + ReasonConditionCheckFailed = "ConditionCheckFailed" ) // ResolvedPipelineRunTask contains a Task and its associated TaskRun, if it @@ -57,6 +60,8 @@ type ResolvedPipelineRunTask struct { TaskRun *v1alpha1.TaskRun PipelineTask *v1alpha1.PipelineTask ResolvedTaskResources *resources.ResolvedTaskResources + // ConditionChecks ~~TaskRuns but for evaling conditions + ResolvedConditionChecks TaskConditionCheckState // Could also be a TaskRun or maybe just a Pod? } // PipelineRunState is a slice of ResolvedPipelineRunTasks the represents the current execution @@ -75,6 +80,41 @@ func (t ResolvedPipelineRunTask) IsDone() (isDone bool) { return } +// IsSuccessful returns true only if the taskrun itself has completed successfully +func (t ResolvedPipelineRunTask) IsSuccessful() bool { + if t.TaskRun == nil { + return false + } + c := t.TaskRun.Status.GetCondition(apis.ConditionSucceeded) + if c == nil { + return false + } + + if c.Status == corev1.ConditionTrue { + return true + } + return false +} + +// IsFailed returns true only if the taskrun itself has failed +func (t ResolvedPipelineRunTask) IsFailure() bool { + if t.TaskRun == nil { + return false + } + c := t.TaskRun.Status.GetCondition(apis.ConditionSucceeded) + retriesDone := len(t.TaskRun.Status.RetriesStatus) + retries := t.PipelineTask.Retries + return c.IsFalse() && retriesDone >= retries +} + +func (state PipelineRunState) toMap() map[string]*ResolvedPipelineRunTask { + m := make(map[string]*ResolvedPipelineRunTask) + for _, rprt := range state { + m[rprt.PipelineTask.Name] = rprt + } + return m +} + func (state PipelineRunState) IsDone() (isDone bool) { isDone = true for _, t := range state { @@ -100,7 +140,7 @@ func (state PipelineRunState) GetNextTasks(candidateTasks map[string]v1alpha1.Pi if _, ok := candidateTasks[t.PipelineTask.Name]; ok && t.TaskRun != nil { status := t.TaskRun.Status.GetCondition(apis.ConditionSucceeded) if status != nil && status.IsFalse() { - if !(t.TaskRun.IsCancelled() || status.Reason == "TaskRunCancelled") { + if !(t.TaskRun.IsCancelled() || status.Reason == v1alpha1.TaskRunSpecStatusCancelled || status.Reason == ReasonConditionCheckFailed) { if len(t.TaskRun.Status.RetriesStatus) < t.PipelineTask.Retries { tasks = append(tasks, t) } @@ -200,6 +240,15 @@ func (e *ResourceNotFoundError) Error() string { return fmt.Sprintf("Couldn't retrieve PipelineResource: %s", e.Msg) } +type ConditionNotFoundError struct { + Name string + Msg string +} + +func (e *ConditionNotFoundError) Error() string { + return fmt.Sprintf("Couldn't retrieve Condition %q: %s", e.Name, e.Msg) +} + // ResolvePipelineRun retrieves all Tasks instances which are reference by tasks, getting // instances from getTask. If it is unable to retrieve an instance of a referenced Task, it // will return an error, otherwise it returns a list of all of the Tasks retrieved. @@ -211,6 +260,7 @@ func ResolvePipelineRun( getTaskRun resources.GetTaskRun, getClusterTask resources.GetClusterTask, getResource resources.GetResource, + getCondition GetCondition, tasks []v1alpha1.PipelineTask, providedResources map[string]v1alpha1.PipelineResourceRef, ) (PipelineRunState, error) { @@ -224,7 +274,7 @@ func ResolvePipelineRun( TaskRunName: getTaskRunName(pipelineRun.Status.TaskRuns, pt.Name, pipelineRun.Name), } - // Find the Task that this task in the Pipeline this PipelineTask is using + // Find the Task that this PipelineTask is using var t v1alpha1.TaskInterface var err error if pt.TaskRef.Kind == v1alpha1.ClusterTaskKind { @@ -261,12 +311,36 @@ func ResolvePipelineRun( if taskRun != nil { rprt.TaskRun = taskRun } + + // Get all conditions that this pipelineTask will be using, if any + if len(pt.Conditions) > 0 { + rcc, err := resolveConditionChecks(&pt, pipelineRun.Status.TaskRuns, rprt.TaskRunName, getTaskRun, getCondition) + if err != nil { + return nil, err + } + rprt.ResolvedConditionChecks = rcc + } + // Add this task to the state of the PipelineRun state = append(state, &rprt) } return state, nil } +// getConditionCheckName should return a unique name for a `ConditionCheck` if one has not already been defined, and the existing one otherwise. +func getConditionCheckName(taskRunStatus map[string]*v1alpha1.PipelineRunTaskRunStatus, trName, conditionName string) string { + trStatus, ok := taskRunStatus[trName] + if ok && trStatus.ConditionChecks != nil { + for k, v := range trStatus.ConditionChecks { + // TODO(1022): Should we allow multiple conditions of the same type? + if conditionName == v.ConditionName { + return k + } + } + } + return names.SimpleNameGenerator.RestrictLengthWithRandomSuffix(fmt.Sprintf("%s-%s", trName, conditionName)) +} + // getTaskRunName should return a unique name for a `TaskRun` if one has not already been defined, and the existing one otherwise. func getTaskRunName(taskRunsStatus map[string]*v1alpha1.PipelineRunTaskRunStatus, ptName, prName string) string { for k, v := range taskRunsStatus { @@ -280,40 +354,26 @@ func getTaskRunName(taskRunsStatus map[string]*v1alpha1.PipelineRunTaskRunStatus // GetPipelineConditionStatus will return the Condition that the PipelineRun prName should be // updated with, based on the status of the TaskRuns in state. -func GetPipelineConditionStatus(prName string, state PipelineRunState, logger *zap.SugaredLogger, startTime *metav1.Time, - pipelineTimeout *metav1.Duration) *apis.Condition { - allFinished := true - if !startTime.IsZero() && pipelineTimeout != nil { - timeout := pipelineTimeout.Duration - runtime := time.Since(startTime.Time) - if runtime > timeout { - logger.Infof("PipelineRun %q has timed out(runtime %s over %s)", prName, runtime, timeout) - - timeoutMsg := fmt.Sprintf("PipelineRun %q failed to finish within %q", prName, timeout.String()) - return &apis.Condition{ - Type: apis.ConditionSucceeded, - Status: corev1.ConditionFalse, - Reason: ReasonTimedOut, - Message: timeoutMsg, - } +func GetPipelineConditionStatus(pr *v1alpha1.PipelineRun, state PipelineRunState, logger *zap.SugaredLogger, dag *v1alpha1.DAG) *apis.Condition { + // We have 4 different states here: + // 1. Timed out -> Failed + // 2. Any one TaskRun has failed - >Failed. This should change with #1020 and #1023 + // 3. All tasks are done or are skipped (i.e. condition check failed).-> Success + // 4. A Task or Condition is running right now or there are things left to run -> Running + + if pr.IsTimedOut() { + return &apis.Condition{ + Type: apis.ConditionSucceeded, + Status: corev1.ConditionFalse, + Reason: ReasonTimedOut, + Message: fmt.Sprintf("PipelineRun %q failed to finish within %q", pr.Name, pr.Spec.Timeout.String()), } } + + // A single failed task mean we fail the pipeline for _, rprt := range state { - if rprt.TaskRun == nil { - logger.Infof("TaskRun %s doesn't have a Status, so PipelineRun %s isn't finished", rprt.TaskRunName, prName) - allFinished = false - continue - } - c := rprt.TaskRun.Status.GetCondition(apis.ConditionSucceeded) - if c == nil { - logger.Infof("TaskRun %s doesn't have a condition, so PipelineRun %s isn't finished", rprt.TaskRunName, prName) - allFinished = false - continue - } - logger.Infof("TaskRun %s status : %v", rprt.TaskRunName, c.Status) - // If any TaskRuns have failed, we should halt execution and consider the run failed - if c.Status == corev1.ConditionFalse && rprt.IsDone() { - logger.Infof("TaskRun %s has failed, so PipelineRun %s has failed, retries done: %b", rprt.TaskRunName, prName, len(rprt.TaskRun.Status.RetriesStatus)) + if rprt.IsFailure() { //IsDone ensures we have crossed the retry limit + logger.Infof("TaskRun %s has failed, so PipelineRun %s has failed, retries done: %b", rprt.TaskRunName, pr.Name, len(rprt.TaskRun.Status.RetriesStatus)) return &apis.Condition{ Type: apis.ConditionSucceeded, Status: corev1.ConditionFalse, @@ -321,27 +381,66 @@ func GetPipelineConditionStatus(prName string, state PipelineRunState, logger *z Message: fmt.Sprintf("TaskRun %s has failed", rprt.TaskRun.Name), } } - if c.Status != corev1.ConditionTrue { - logger.Infof("TaskRun %s is still running so PipelineRun %s is still running", rprt.TaskRunName, prName) - allFinished = false + } + + allTasks := []string{} + successOrSkipTasks := []string{} + + // Check to see if all tasks are success or skipped + for _, rprt := range state { + allTasks = append(allTasks, rprt.PipelineTask.Name) + if rprt.IsSuccessful() || isSkipped(rprt, state.toMap(), dag) { + successOrSkipTasks = append(successOrSkipTasks, rprt.PipelineTask.Name) } } - if !allFinished { - logger.Infof("PipelineRun %s still has running TaskRuns so it isn't yet done", prName) + + if reflect.DeepEqual(allTasks, successOrSkipTasks) { + logger.Infof("All TaskRuns have finished for PipelineRun %s so it has finished", pr.Name) return &apis.Condition{ Type: apis.ConditionSucceeded, - Status: corev1.ConditionUnknown, - Reason: ReasonRunning, - Message: "Not all Tasks in the Pipeline have finished executing", + Status: corev1.ConditionTrue, + Reason: ReasonSucceeded, + Message: "All Tasks have completed executing", } } - logger.Infof("All TaskRuns have finished for PipelineRun %s so it has finished", prName) + + // Hasn't timed out; no taskrun failed yet; and not all tasks have finished.... + // Must keep running then.... return &apis.Condition{ Type: apis.ConditionSucceeded, - Status: corev1.ConditionTrue, - Reason: ReasonSucceeded, - Message: "All Tasks have completed executing", + Status: corev1.ConditionUnknown, + Reason: ReasonRunning, + Message: "Not all Tasks in the Pipeline have finished executing", + } +} + +// isSkipped returns true if a Task in a TaskRun will not be run either because +// its Condition Checks failed or because one of the parent tasks's conditions failed +// Note that this means isSkipped returns false if a conditionCheck is in progress +func isSkipped(rprt *ResolvedPipelineRunTask, stateMap map[string]*ResolvedPipelineRunTask, d *v1alpha1.DAG) bool { + // Taskrun not skipped if it already exists + if rprt.TaskRun != nil { + return false + } + + // Check if conditionChecks have failed, if so task is skipped + if len(rprt.ResolvedConditionChecks) > 0 { + // isSkipped is only true iof + if rprt.ResolvedConditionChecks.IsDone() && !rprt.ResolvedConditionChecks.IsSuccess() { + return true + } + } + + // Recursively look at parent tasks to see if they have been skipped, + // if any of the parents have been skipped, skip as well + node := d.Nodes[rprt.PipelineTask.Name] + for _, p := range node.Prev { + skip := isSkipped(stateMap[p.Task.Name], stateMap, d) + if skip { + return true + } } + return false } func findReferencedTask(pb string, state []*ResolvedPipelineRunTask) *ResolvedPipelineRunTask { @@ -389,3 +488,34 @@ func ValidateFrom(state PipelineRunState) error { return nil } + +func resolveConditionChecks(pt *v1alpha1.PipelineTask, + taskRunStatus map[string]*v1alpha1.PipelineRunTaskRunStatus, + taskRunName string, getTaskRun resources.GetTaskRun, getCondition GetCondition) ([]*ResolvedConditionCheck, error) { + rcc := []*ResolvedConditionCheck{} + for _, ptc := range pt.Conditions { + cName := ptc.ConditionRef + c, err := getCondition(cName) + if err != nil { + return nil, &ConditionNotFoundError{ + Name: cName, + Msg: err.Error(), + } + } + conditionCheckName := getConditionCheckName(taskRunStatus, taskRunName, cName) + cctr, err := getTaskRun(conditionCheckName) + if err != nil { + if !errors.IsNotFound(err) { + return nil, xerrors.Errorf("error retrieving ConditionCheck %s for taskRun name %s : %w", conditionCheckName, taskRunName, err) + } + } + + rcc = append(rcc, &ResolvedConditionCheck{ + Condition: c, + ConditionCheckName: conditionCheckName, + ConditionCheck: v1alpha1.NewConditionCheck(cctr), + PipelineTaskCondition: &ptc, + }) + } + return rcc, nil +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/reconciler/v1alpha1/pipelinerun/resources/validate_params.go b/vendor/github.com/tektoncd/pipeline/pkg/reconciler/v1alpha1/pipelinerun/resources/validate_params.go new file mode 100644 index 000000000..e50ada40a --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/reconciler/v1alpha1/pipelinerun/resources/validate_params.go @@ -0,0 +1,47 @@ +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resources + +import ( + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" + "golang.org/x/xerrors" +) + +// Validate that parameters in PipelineRun override corresponding parameters in Pipeline of the same type. +func ValidateParamTypesMatching(p *v1alpha1.Pipeline, pr *v1alpha1.PipelineRun) error { + // Build a map of parameter names/types declared in p. + paramTypes := make(map[string]v1alpha1.ParamType) + for _, param := range p.Spec.Params { + paramTypes[param.Name] = param.Type + } + + // Build a list of parameter names from pr that have mismatching types with the map created above. + var wrongTypeParamNames []string + for _, param := range pr.Spec.Params { + if paramType, ok := paramTypes[param.Name]; ok { + if param.Value.Type != paramType { + wrongTypeParamNames = append(wrongTypeParamNames, param.Name) + } + } + } + + // Return an error with the misconfigured parameters' names, or return nil if there are none. + if len(wrongTypeParamNames) != 0 { + return xerrors.Errorf("parameters have inconsistent types : %s", wrongTypeParamNames) + } + return nil +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/reconciler/v1alpha1/taskrun/entrypoint/entrypoint.go b/vendor/github.com/tektoncd/pipeline/pkg/reconciler/v1alpha1/taskrun/entrypoint/entrypoint.go index a57a511d0..b83013e9b 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/reconciler/v1alpha1/taskrun/entrypoint/entrypoint.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/reconciler/v1alpha1/taskrun/entrypoint/entrypoint.go @@ -105,15 +105,14 @@ func AddCopyStep(spec *v1alpha1.TaskSpec) { Args: []string{"-c", fmt.Sprintf("cp /ko-app/entrypoint %s", BinaryLocation)}, VolumeMounts: []corev1.VolumeMount{toolsMount}, } - spec.Steps = append([]corev1.Container{cp}, spec.Steps...) - + spec.Steps = append([]v1alpha1.Step{{Container: cp}}, spec.Steps...) } // RedirectSteps will modify each of the steps/containers such that // the binary being run is no longer the one specified by the Command // and the Args, but is instead the entrypoint binary, which will // itself invoke the Command and Args, but also capture logs. -func RedirectSteps(cache *Cache, steps []corev1.Container, kubeclient kubernetes.Interface, taskRun *v1alpha1.TaskRun, logger *zap.SugaredLogger) error { +func RedirectSteps(cache *Cache, steps []v1alpha1.Step, kubeclient kubernetes.Interface, taskRun *v1alpha1.TaskRun, logger *zap.SugaredLogger) error { for i := range steps { step := &steps[i] if err := RedirectStep(cache, i, step, kubeclient, taskRun, logger); err != nil { @@ -128,7 +127,7 @@ func RedirectSteps(cache *Cache, steps []corev1.Container, kubeclient kubernetes // the binary being run is no longer the one specified by the Command // and the Args, but is instead the entrypoint binary, which will // itself invoke the Command and Args, but also capture logs. -func RedirectStep(cache *Cache, stepNum int, step *corev1.Container, kubeclient kubernetes.Interface, taskRun *v1alpha1.TaskRun, logger *zap.SugaredLogger) error { +func RedirectStep(cache *Cache, stepNum int, step *v1alpha1.Step, kubeclient kubernetes.Interface, taskRun *v1alpha1.TaskRun, logger *zap.SugaredLogger) error { if len(step.Command) == 0 { logger.Infof("Getting Cmd from remote entrypoint for step: %s", step.Name) var err error @@ -187,11 +186,34 @@ func getWaitFile(stepNum int) string { // GetRemoteEntrypoint accepts a cache of digest lookups, as well as the digest // to look for. If the cache does not contain the digest, it will lookup the // metadata from the images registry, and then commit that to the cache -func GetRemoteEntrypoint(cache *Cache, digest string, kubeclient kubernetes.Interface, taskRun *v1alpha1.TaskRun) ([]string, error) { +func GetRemoteEntrypoint(cache *Cache, image string, kubeclient kubernetes.Interface, taskRun *v1alpha1.TaskRun) ([]string, error) { + ref, err := name.ParseReference(image, name.WeakValidation) + if err != nil { + return nil, xerrors.Errorf("Failed to parse image %s: %w", image, err) + } + + var digest string + // If the image is specified as a digest, we can just take the digest from the name and use that in our cache. + // Otherwise we first have to resolve the tag to a digest. + if d, ok := ref.(name.Digest); ok { + digest = d.String() + } else { + img, err := getRemoteImage(image, kubeclient, taskRun) + if err != nil { + return nil, xerrors.Errorf("Failed to fetch remote image %s: %w", digest, err) + } + d, err := img.Digest() + if err != nil { + return nil, xerrors.Errorf("Failed to get digest for image %s: %w", image, err) + } + digest = d.String() + } + if ep, ok := cache.get(digest); ok { return ep, nil } - img, err := getRemoteImage(digest, kubeclient, taskRun) + + img, err := getRemoteImage(image, kubeclient, taskRun) if err != nil { return nil, xerrors.Errorf("Failed to fetch remote image %s: %w", digest, err) } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/reconciler/v1alpha1/taskrun/resources/apply.go b/vendor/github.com/tektoncd/pipeline/pkg/reconciler/v1alpha1/taskrun/resources/apply.go index b07b0964e..456763eb3 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/reconciler/v1alpha1/taskrun/resources/apply.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/reconciler/v1alpha1/taskrun/resources/apply.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Tekton Authors. +Copyright 2019 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -18,31 +18,43 @@ package resources import ( "fmt" + "path/filepath" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" - "github.com/tektoncd/pipeline/pkg/templating" - corev1 "k8s.io/api/core/v1" ) // ApplyParameters applies the params from a TaskRun.Input.Parameters to a TaskSpec func ApplyParameters(spec *v1alpha1.TaskSpec, tr *v1alpha1.TaskRun, defaults ...v1alpha1.ParamSpec) *v1alpha1.TaskSpec { // This assumes that the TaskRun inputs have been validated against what the Task requests. - replacements := map[string]string{} - // Set all the default replacements + + // stringReplacements is used for standard single-string stringReplacements, while arrayReplacements contains arrays + // that need to be further processed. + stringReplacements := map[string]string{} + arrayReplacements := map[string][]string{} + + // Set all the default stringReplacements for _, p := range defaults { - if p.Default != "" { - replacements[fmt.Sprintf("inputs.params.%s", p.Name)] = p.Default + if p.Default != nil { + if p.Default.Type == v1alpha1.ParamTypeString { + stringReplacements[fmt.Sprintf("inputs.params.%s", p.Name)] = p.Default.StringVal + } else { + arrayReplacements[fmt.Sprintf("inputs.params.%s", p.Name)] = p.Default.ArrayVal + } } } // Set and overwrite params with the ones from the TaskRun for _, p := range tr.Spec.Inputs.Params { - replacements[fmt.Sprintf("inputs.params.%s", p.Name)] = p.Value + if p.Value.Type == v1alpha1.ParamTypeString { + stringReplacements[fmt.Sprintf("inputs.params.%s", p.Name)] = p.Value.StringVal + } else { + arrayReplacements[fmt.Sprintf("inputs.params.%s", p.Name)] = p.Value.ArrayVal + } } - return ApplyReplacements(spec, replacements) + return ApplyReplacements(spec, stringReplacements, arrayReplacements) } -// ApplyResources applies the templating from values in resources which are referenced in spec as subitems +// ApplyResources applies the substitution from values in resources which are referenced in spec as subitems // of the replacementStr. func ApplyResources(spec *v1alpha1.TaskSpec, resolvedResources map[string]v1alpha1.PipelineResourceInterface, replacementStr string) *v1alpha1.TaskSpec { replacements := map[string]string{} @@ -51,82 +63,57 @@ func ApplyResources(spec *v1alpha1.TaskSpec, resolvedResources map[string]v1alph replacements[fmt.Sprintf("%s.resources.%s.%s", replacementStr, name, k)] = v } } - return ApplyReplacements(spec, replacements) + + // We always add replacements for 'path' + if spec.Inputs != nil { + for _, r := range spec.Inputs.Resources { + replacements[fmt.Sprintf("inputs.resources.%s.path", r.Name)] = path("/workspace", r) + } + } + if spec.Outputs != nil { + for _, r := range spec.Outputs.Resources { + replacements[fmt.Sprintf("outputs.resources.%s.path", r.Name)] = path("/workspace/output", r) + } + } + + return ApplyReplacements(spec, replacements, map[string][]string{}) +} + +func path(root string, r v1alpha1.TaskResource) string { + if r.TargetPath != "" { + return filepath.Join("/workspace", r.TargetPath) + } + return filepath.Join(root, r.Name) } // ApplyReplacements replaces placeholders for declared parameters with the specified replacements. -func ApplyReplacements(spec *v1alpha1.TaskSpec, replacements map[string]string) *v1alpha1.TaskSpec { +func ApplyReplacements(spec *v1alpha1.TaskSpec, stringReplacements map[string]string, arrayReplacements map[string][]string) *v1alpha1.TaskSpec { spec = spec.DeepCopy() // Apply variable expansion to steps fields. steps := spec.Steps for i := range steps { - applyContainerReplacements(&steps[i], replacements) - } - - // Apply variable expansion to containerTemplate fields. - // Should eventually be removed; ContainerTemplate is the deprecated previous name of the StepTemplate field (#977). - if spec.ContainerTemplate != nil { - applyContainerReplacements(spec.ContainerTemplate, replacements) + v1alpha1.ApplyStepReplacements(&steps[i], stringReplacements, arrayReplacements) } // Apply variable expansion to stepTemplate fields. if spec.StepTemplate != nil { - applyContainerReplacements(spec.StepTemplate, replacements) + v1alpha1.ApplyStepReplacements(&v1alpha1.Step{Container: *spec.StepTemplate}, stringReplacements, arrayReplacements) } // Apply variable expansion to the build's volumes for i, v := range spec.Volumes { - spec.Volumes[i].Name = templating.ApplyReplacements(v.Name, replacements) + spec.Volumes[i].Name = v1alpha1.ApplyReplacements(v.Name, stringReplacements) if v.VolumeSource.ConfigMap != nil { - spec.Volumes[i].ConfigMap.Name = templating.ApplyReplacements(v.ConfigMap.Name, replacements) + spec.Volumes[i].ConfigMap.Name = v1alpha1.ApplyReplacements(v.ConfigMap.Name, stringReplacements) } if v.VolumeSource.Secret != nil { - spec.Volumes[i].Secret.SecretName = templating.ApplyReplacements(v.Secret.SecretName, replacements) + spec.Volumes[i].Secret.SecretName = v1alpha1.ApplyReplacements(v.Secret.SecretName, stringReplacements) } if v.PersistentVolumeClaim != nil { - spec.Volumes[i].PersistentVolumeClaim.ClaimName = templating.ApplyReplacements(v.PersistentVolumeClaim.ClaimName, replacements) + spec.Volumes[i].PersistentVolumeClaim.ClaimName = v1alpha1.ApplyReplacements(v.PersistentVolumeClaim.ClaimName, stringReplacements) } } return spec } - -func applyContainerReplacements(container *corev1.Container, replacements map[string]string) { - container.Name = templating.ApplyReplacements(container.Name, replacements) - container.Image = templating.ApplyReplacements(container.Image, replacements) - for ia, a := range container.Args { - container.Args[ia] = templating.ApplyReplacements(a, replacements) - } - for ie, e := range container.Env { - container.Env[ie].Value = templating.ApplyReplacements(e.Value, replacements) - if container.Env[ie].ValueFrom != nil { - if e.ValueFrom.SecretKeyRef != nil { - container.Env[ie].ValueFrom.SecretKeyRef.LocalObjectReference.Name = templating.ApplyReplacements(e.ValueFrom.SecretKeyRef.LocalObjectReference.Name, replacements) - container.Env[ie].ValueFrom.SecretKeyRef.Key = templating.ApplyReplacements(e.ValueFrom.SecretKeyRef.Key, replacements) - } - if e.ValueFrom.ConfigMapKeyRef != nil { - container.Env[ie].ValueFrom.ConfigMapKeyRef.LocalObjectReference.Name = templating.ApplyReplacements(e.ValueFrom.ConfigMapKeyRef.LocalObjectReference.Name, replacements) - container.Env[ie].ValueFrom.ConfigMapKeyRef.Key = templating.ApplyReplacements(e.ValueFrom.ConfigMapKeyRef.Key, replacements) - } - } - } - for ie, e := range container.EnvFrom { - container.EnvFrom[ie].Prefix = templating.ApplyReplacements(e.Prefix, replacements) - if e.ConfigMapRef != nil { - container.EnvFrom[ie].ConfigMapRef.LocalObjectReference.Name = templating.ApplyReplacements(e.ConfigMapRef.LocalObjectReference.Name, replacements) - } - if e.SecretRef != nil { - container.EnvFrom[ie].SecretRef.LocalObjectReference.Name = templating.ApplyReplacements(e.SecretRef.LocalObjectReference.Name, replacements) - } - } - container.WorkingDir = templating.ApplyReplacements(container.WorkingDir, replacements) - for ic, c := range container.Command { - container.Command[ic] = templating.ApplyReplacements(c, replacements) - } - for iv, v := range container.VolumeMounts { - container.VolumeMounts[iv].Name = templating.ApplyReplacements(v.Name, replacements) - container.VolumeMounts[iv].MountPath = templating.ApplyReplacements(v.MountPath, replacements) - container.VolumeMounts[iv].SubPath = templating.ApplyReplacements(v.SubPath, replacements) - } -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/reconciler/v1alpha1/taskrun/resources/image_exporter.go b/vendor/github.com/tektoncd/pipeline/pkg/reconciler/v1alpha1/taskrun/resources/image_exporter.go index 868e3681e..0f7be8b87 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/reconciler/v1alpha1/taskrun/resources/image_exporter.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/reconciler/v1alpha1/taskrun/resources/image_exporter.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Tekton Authors. +Copyright 2019 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -69,16 +69,14 @@ func AddOutputImageDigestExporter( } if len(output) > 0 { - augmentedSteps := []corev1.Container{} + augmentedSteps := []v1alpha1.Step{} imagesJSON, err := json.Marshal(output) if err != nil { return xerrors.Errorf("Failed to format image resource data for output image exporter: %w", err) } - for _, s := range taskSpec.Steps { - augmentedSteps = append(augmentedSteps, s) - augmentedSteps = append(augmentedSteps, imageDigestExporterContainer(s.Name, imagesJSON)) - } + augmentedSteps = append(augmentedSteps, taskSpec.Steps...) + augmentedSteps = append(augmentedSteps, imageDigestExporterStep(imagesJSON)) taskSpec.Steps = augmentedSteps } @@ -97,9 +95,9 @@ func UpdateTaskRunStatusWithResourceResult(taskRun *v1alpha1.TaskRun, logContent return nil } -func imageDigestExporterContainer(stepName string, imagesJSON []byte) corev1.Container { - return corev1.Container{ - Name: names.SimpleNameGenerator.RestrictLengthWithRandomSuffix("image-digest-exporter-" + stepName), +func imageDigestExporterStep(imagesJSON []byte) v1alpha1.Step { + return v1alpha1.Step{Container: corev1.Container{ + Name: names.SimpleNameGenerator.RestrictLengthWithRandomSuffix("image-digest-exporter"), Image: *imageDigestExporterImage, Command: []string{"/ko-app/imagedigestexporter"}, Args: []string{ @@ -108,7 +106,7 @@ func imageDigestExporterContainer(stepName string, imagesJSON []byte) corev1.Con }, TerminationMessagePath: TerminationMessagePath, TerminationMessagePolicy: corev1.TerminationMessageFallbackToLogsOnError, - } + }} } // TaskRunHasOutputImageResource return true if the task has any output resources of type image diff --git a/vendor/github.com/tektoncd/pipeline/pkg/reconciler/v1alpha1/taskrun/resources/input_resources.go b/vendor/github.com/tektoncd/pipeline/pkg/reconciler/v1alpha1/taskrun/resources/input_resources.go index a40577dd7..cb8373162 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/reconciler/v1alpha1/taskrun/resources/input_resources.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/reconciler/v1alpha1/taskrun/resources/input_resources.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Tekton Authors. +Copyright 2019 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -17,7 +17,6 @@ limitations under the License. package resources import ( - "fmt" "path/filepath" "github.com/tektoncd/pipeline/pkg/apis/pipeline" @@ -71,8 +70,7 @@ func AddInputResource( return nil, err } - allResourceContainers := []corev1.Container{} - + var allResourceSteps []v1alpha1.Step for _, input := range taskSpec.Inputs.Resources { boundResource, err := getBoundResource(input.Name, taskRun.Spec.Inputs.Resources) if err != nil { @@ -82,29 +80,25 @@ func AddInputResource( if !ok || resource == nil { return nil, xerrors.Errorf("failed to Get Pipeline Resource for task %s with boundResource %v", taskName, boundResource) } - var ( - resourceContainers []corev1.Container - resourceVolumes []corev1.Volume - copyStepsFromPrevTasks []corev1.Container - dPath = destinationPath(input.Name, input.TargetPath) - ) - resource.SetDestinationDirectory(dPath) + var resourceVolumes []corev1.Volume + var copyStepsFromPrevTasks []v1alpha1.Step + dPath := destinationPath(input.Name, input.TargetPath) // if taskrun is fetching resource from previous task then execute copy step instead of fetching new copy // to the desired destination directory, as long as the resource exports output to be copied if allowedOutputResources[resource.GetType()] && taskRun.HasPipelineRunOwnerReference() { for _, path := range boundResource.Paths { - cpContainers := as.GetCopyFromStorageToContainerSpec(boundResource.Name, path, dPath) + cpSteps := as.GetCopyFromStorageToSteps(boundResource.Name, path, dPath) if as.GetType() == v1alpha1.ArtifactStoragePVCType { - mountPVC = true - for _, ct := range cpContainers { - ct.VolumeMounts = []corev1.VolumeMount{v1alpha1.GetPvcMount(pvcName)} - createAndCopyContainers := []corev1.Container{v1alpha1.CreateDirContainer(boundResource.Name, dPath), ct} - copyStepsFromPrevTasks = append(copyStepsFromPrevTasks, createAndCopyContainers...) + for _, s := range cpSteps { + s.VolumeMounts = []corev1.VolumeMount{v1alpha1.GetPvcMount(pvcName)} + copyStepsFromPrevTasks = append(copyStepsFromPrevTasks, + v1alpha1.CreateDirStep(boundResource.Name, dPath), + s) } } else { // bucket - copyStepsFromPrevTasks = append(copyStepsFromPrevTasks, cpContainers...) + copyStepsFromPrevTasks = append(copyStepsFromPrevTasks, cpSteps...) } } } @@ -113,32 +107,20 @@ func AddInputResource( taskSpec.Steps = append(copyStepsFromPrevTasks, taskSpec.Steps...) taskSpec.Volumes = append(taskSpec.Volumes, as.GetSecretsVolumes()...) } else { - switch resource.GetType() { - case v1alpha1.PipelineResourceTypeStorage: - { - storageResource, ok := resource.(v1alpha1.PipelineStorageResourceInterface) - if !ok { - return nil, xerrors.Errorf("task %q invalid gcs Pipeline Resource: %q", taskName, boundResource.ResourceRef.Name) - } - resourceContainers, resourceVolumes, err = addStorageFetchStep(taskSpec, storageResource) - if err != nil { - return nil, xerrors.Errorf("task %q invalid gcs Pipeline Resource download steps: %q: %w", taskName, boundResource.ResourceRef.Name, err) - } - } - default: - { - resourceContainers, err = resource.GetDownloadContainerSpec() - if err != nil { - return nil, xerrors.Errorf("task %q invalid resource download spec: %q; error %w", taskName, boundResource.ResourceRef.Name, err) - } - } + resourceSteps, err := resource.GetDownloadSteps(dPath) + if err != nil { + return nil, xerrors.Errorf("task %q invalid resource download spec: %q; error %w", taskName, boundResource.ResourceRef.Name, err) + } + resourceVolumes, err = resource.GetDownloadVolumeSpec(taskSpec) + if err != nil { + return nil, xerrors.Errorf("task %q invalid resource download spec: %q; error %w", taskName, boundResource.ResourceRef.Name, err) } - allResourceContainers = append(allResourceContainers, resourceContainers...) + allResourceSteps = append(allResourceSteps, resourceSteps...) taskSpec.Volumes = append(taskSpec.Volumes, resourceVolumes...) } } - taskSpec.Steps = append(allResourceContainers, taskSpec.Steps...) + taskSpec.Steps = append(allResourceSteps, taskSpec.Steps...) if mountPVC { taskSpec.Volumes = append(taskSpec.Volumes, GetPVCVolume(pvcName)) @@ -146,38 +128,6 @@ func AddInputResource( return taskSpec, nil } -func addStorageFetchStep(taskSpec *v1alpha1.TaskSpec, storageResource v1alpha1.PipelineStorageResourceInterface) ([]corev1.Container, []corev1.Volume, error) { - gcsContainers, err := storageResource.GetDownloadContainerSpec() - if err != nil { - return nil, nil, err - } - - var storageVol []corev1.Volume - mountedSecrets := map[string]string{} - for _, volume := range taskSpec.Volumes { - mountedSecrets[volume.Name] = "" - } - - for _, secretParam := range storageResource.GetSecretParams() { - volName := fmt.Sprintf("volume-%s-%s", storageResource.GetName(), secretParam.SecretName) - - gcsSecretVolume := corev1.Volume{ - Name: volName, - VolumeSource: corev1.VolumeSource{ - Secret: &corev1.SecretVolumeSource{ - SecretName: secretParam.SecretName, - }, - }, - } - - if _, ok := mountedSecrets[volName]; !ok { - storageVol = append(storageVol, gcsSecretVolume) - mountedSecrets[volName] = "" - } - } - return gcsContainers, storageVol, nil -} - func getResource(r *v1alpha1.TaskResourceBinding, getter GetResource) (*v1alpha1.PipelineResource, error) { // Check both resource ref or resource Spec are not present. Taskrun webhook should catch this in validation error. if r.ResourceRef.Name != "" && r.ResourceSpec != nil { diff --git a/vendor/github.com/tektoncd/pipeline/pkg/reconciler/v1alpha1/taskrun/resources/output_resource.go b/vendor/github.com/tektoncd/pipeline/pkg/reconciler/v1alpha1/taskrun/resources/output_resource.go index d50152766..9633ef249 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/reconciler/v1alpha1/taskrun/resources/output_resource.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/reconciler/v1alpha1/taskrun/resources/output_resource.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Tekton Authors. +Copyright 2019 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -17,14 +17,12 @@ limitations under the License. package resources import ( - "fmt" "path/filepath" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" "github.com/tektoncd/pipeline/pkg/artifacts" "go.uber.org/zap" "golang.org/x/xerrors" - corev1 "k8s.io/api/core/v1" "k8s.io/client-go/kubernetes" ) @@ -91,57 +89,44 @@ func AddOutputResources( if !ok || resource == nil { return nil, xerrors.Errorf("failed to get output pipeline Resource for task %q resource %v", taskName, boundResource) } - var ( - resourceContainers []corev1.Container - resourceVolumes []corev1.Volume - ) + // if resource is declared in input then copy outputs to pvc // To build copy step it needs source path(which is targetpath of input resourcemap) from task input source sourcePath := inputResourceMap[boundResource.Name] - if sourcePath == "" { + if sourcePath != "" { + logger.Warn(`This task uses the same resource as an input and output. The behavior of this will change in a future release. + See https://github.com/tektoncd/pipeline/issues/1118 for more information.`) + } else { if output.TargetPath == "" { sourcePath = filepath.Join(outputDir, boundResource.Name) } else { sourcePath = output.TargetPath } } - resource.SetDestinationDirectory(sourcePath) - switch resource.GetType() { - case v1alpha1.PipelineResourceTypeStorage: - { - storageResource, ok := resource.(v1alpha1.PipelineStorageResourceInterface) - if !ok { - return nil, xerrors.Errorf("task %q invalid storage Pipeline Resource: %q", - taskName, - boundResource.ResourceRef.Name, - ) - } - resourceContainers, resourceVolumes, err = addStoreUploadStep(taskSpec, storageResource) - if err != nil { - return nil, xerrors.Errorf("task %q invalid Pipeline Resource: %q; invalid upload steps err: %w", - taskName, boundResource.ResourceRef.Name, err) - } - } - default: - { - resourceContainers, err = resource.GetUploadContainerSpec() - if err != nil { - return nil, xerrors.Errorf("task %q invalid download spec: %q; error %w", taskName, boundResource.ResourceRef.Name, err) - } - } + + resourceSteps, err := resource.GetUploadSteps(sourcePath) + if err != nil { + return nil, xerrors.Errorf("task %q invalid upload spec: %q; error %w", taskName, boundResource.ResourceRef.Name, err) + } + + resourceVolumes, err := resource.GetUploadVolumeSpec(taskSpec) + if err != nil { + return nil, xerrors.Errorf("task %q invalid upload spec: %q; error %w", taskName, boundResource.ResourceRef.Name, err) } if allowedOutputResources[resource.GetType()] && taskRun.HasPipelineRunOwnerReference() { - var newSteps []corev1.Container + var newSteps []v1alpha1.Step for _, dPath := range boundResource.Paths { - containers := as.GetCopyToStorageFromContainerSpec(resource.GetName(), sourcePath, dPath) - newSteps = append(newSteps, containers...) + newSteps = append(newSteps, as.GetCopyToStorageFromSteps(resource.GetName(), sourcePath, dPath)...) } - resourceContainers = append(resourceContainers, newSteps...) + resourceSteps = append(resourceSteps, newSteps...) resourceVolumes = append(resourceVolumes, as.GetSecretsVolumes()...) } - taskSpec.Steps = append(taskSpec.Steps, resourceContainers...) + // Add containers to mkdir each output directory. This should run before the build steps themselves. + mkdirSteps := []v1alpha1.Step{v1alpha1.CreateDirStep(boundResource.Name, sourcePath)} + taskSpec.Steps = append(mkdirSteps, taskSpec.Steps...) + taskSpec.Steps = append(taskSpec.Steps, resourceSteps...) taskSpec.Volumes = append(taskSpec.Volumes, resourceVolumes...) if as.GetType() == v1alpha1.ArtifactStoragePVCType { @@ -160,39 +145,3 @@ func AddOutputResources( } return taskSpec, nil } - -func addStoreUploadStep(spec *v1alpha1.TaskSpec, - storageResource v1alpha1.PipelineStorageResourceInterface, -) ([]corev1.Container, []corev1.Volume, error) { - - gcsContainers, err := storageResource.GetUploadContainerSpec() - if err != nil { - return nil, nil, err - } - var storageVol []corev1.Volume - mountedSecrets := map[string]string{} - - for _, volume := range spec.Volumes { - mountedSecrets[volume.Name] = "" - } - - // Map holds list of secrets that are mounted as volumes - for _, secretParam := range storageResource.GetSecretParams() { - volName := fmt.Sprintf("volume-%s-%s", storageResource.GetName(), secretParam.SecretName) - - gcsSecretVolume := corev1.Volume{ - Name: volName, - VolumeSource: corev1.VolumeSource{ - Secret: &corev1.SecretVolumeSource{ - SecretName: secretParam.SecretName, - }, - }, - } - - if _, ok := mountedSecrets[volName]; !ok { - storageVol = append(storageVol, gcsSecretVolume) - mountedSecrets[volName] = "" - } - } - return gcsContainers, storageVol, nil -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/reconciler/v1alpha1/taskrun/resources/pod.go b/vendor/github.com/tektoncd/pipeline/pkg/reconciler/v1alpha1/taskrun/resources/pod.go index 27e96302c..197d3785c 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/reconciler/v1alpha1/taskrun/resources/pod.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/reconciler/v1alpha1/taskrun/resources/pod.go @@ -29,22 +29,19 @@ import ( "sort" "strings" - "go.uber.org/zap" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/client-go/kubernetes" - "github.com/tektoncd/pipeline/pkg/apis/pipeline" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" "github.com/tektoncd/pipeline/pkg/credentials" "github.com/tektoncd/pipeline/pkg/credentials/dockercreds" "github.com/tektoncd/pipeline/pkg/credentials/gitcreds" - "github.com/tektoncd/pipeline/pkg/merge" "github.com/tektoncd/pipeline/pkg/names" "github.com/tektoncd/pipeline/pkg/reconciler/v1alpha1/taskrun/entrypoint" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/kubernetes" ) const ( @@ -107,7 +104,7 @@ var ( "The container image for preparing our Build's credentials.") ) -func makeCredentialInitializer(serviceAccountName, namespace string, kubeclient kubernetes.Interface) (*corev1.Container, []corev1.Volume, error) { +func makeCredentialInitializer(serviceAccountName, namespace string, kubeclient kubernetes.Interface) (*v1alpha1.Step, []corev1.Volume, error) { if serviceAccountName == "" { serviceAccountName = "default" } @@ -154,7 +151,7 @@ func makeCredentialInitializer(serviceAccountName, namespace string, kubeclient } } - return &corev1.Container{ + return &v1alpha1.Step{Container: corev1.Container{ Name: names.SimpleNameGenerator.RestrictLengthWithRandomSuffix(containerPrefix + credsInit), Image: *credsImage, Command: []string{"/ko-app/creds-init"}, @@ -162,7 +159,7 @@ func makeCredentialInitializer(serviceAccountName, namespace string, kubeclient VolumeMounts: volumeMounts, Env: implicitEnvVars, WorkingDir: workspaceDir, - }, volumes, nil + }}, volumes, nil } func makeWorkingDirScript(workingDirs map[string]bool) string { @@ -190,14 +187,14 @@ func makeWorkingDirScript(workingDirs map[string]bool) string { return script } -func makeWorkingDirInitializer(steps []corev1.Container) *corev1.Container { +func makeWorkingDirInitializer(steps []v1alpha1.Step) *v1alpha1.Step { workingDirs := make(map[string]bool) for _, step := range steps { workingDirs[step.WorkingDir] = true } if script := makeWorkingDirScript(workingDirs); script != "" { - return &corev1.Container{ + return &v1alpha1.Step{Container: corev1.Container{ Name: names.SimpleNameGenerator.RestrictLengthWithRandomSuffix(containerPrefix + workingDirInit), Image: *v1alpha1.BashNoopImage, Command: []string{"/ko-app/bash"}, @@ -205,24 +202,23 @@ func makeWorkingDirInitializer(steps []corev1.Container) *corev1.Container { VolumeMounts: implicitVolumeMounts, Env: implicitEnvVars, WorkingDir: workspaceDir, - } + }} } - return nil } // initOutputResourcesDefaultDir checks if there are any output image resources expecting a default path // and creates an init container to create that folder -func initOutputResourcesDefaultDir(taskRun *v1alpha1.TaskRun, taskSpec v1alpha1.TaskSpec) []corev1.Container { - makeDirSteps := []corev1.Container{} +func initOutputResourcesDefaultDir(taskRun *v1alpha1.TaskRun, taskSpec v1alpha1.TaskSpec) []v1alpha1.Step { + var makeDirSteps []v1alpha1.Step if len(taskRun.Spec.Outputs.Resources) > 0 { for _, r := range taskRun.Spec.Outputs.Resources { for _, o := range taskSpec.Outputs.Resources { if o.Name == r.Name { if strings.HasPrefix(o.OutputImageDir, v1alpha1.TaskOutputImageDefaultDir) { - cn := v1alpha1.CreateDirContainer("default-image-output", fmt.Sprintf("%s/%s", v1alpha1.TaskOutputImageDefaultDir, r.Name)) - cn.VolumeMounts = append(cn.VolumeMounts, implicitVolumeMounts...) - makeDirSteps = append(makeDirSteps, cn) + s := v1alpha1.CreateDirStep("default-image-output", fmt.Sprintf("%s/%s", v1alpha1.TaskOutputImageDefaultDir, r.Name)) + s.VolumeMounts = append(s.VolumeMounts, implicitVolumeMounts...) + makeDirSteps = append(makeDirSteps, s) } } } @@ -250,58 +246,59 @@ func TryGetPod(taskRunStatus v1alpha1.TaskRunStatus, gp GetPod) (*corev1.Pod, er // MakePod converts TaskRun and TaskSpec objects to a Pod which implements the taskrun specified // by the supplied CRD. -func MakePod(taskRun *v1alpha1.TaskRun, taskSpec v1alpha1.TaskSpec, kubeclient kubernetes.Interface, cache *entrypoint.Cache, logger *zap.SugaredLogger) (*corev1.Pod, error) { +func MakePod(taskRun *v1alpha1.TaskRun, taskSpec v1alpha1.TaskSpec, kubeclient kubernetes.Interface) (*corev1.Pod, error) { cred, secrets, err := makeCredentialInitializer(taskRun.Spec.ServiceAccount, taskRun.Namespace, kubeclient) if err != nil { return nil, err } - initContainers := []corev1.Container{*cred} - podContainers := []corev1.Container{} + initSteps := []v1alpha1.Step{*cred} + var podSteps []v1alpha1.Step if workingDir := makeWorkingDirInitializer(taskSpec.Steps); workingDir != nil { - initContainers = append(initContainers, *workingDir) + initSteps = append(initSteps, *workingDir) } - initContainers = append(initContainers, initOutputResourcesDefaultDir(taskRun, taskSpec)...) + initSteps = append(initSteps, initOutputResourcesDefaultDir(taskRun, taskSpec)...) maxIndicesByResource := findMaxResourceRequest(taskSpec.Steps, corev1.ResourceCPU, corev1.ResourceMemory, corev1.ResourceEphemeralStorage) - for i := range taskSpec.Steps { - step := &taskSpec.Steps[i] - step.Env = append(implicitEnvVars, step.Env...) + for i, s := range taskSpec.Steps { + s.Env = append(implicitEnvVars, s.Env...) // TODO(mattmoor): Check that volumeMounts match volumes. // Add implicit volume mounts, unless the user has requested // their own volume mount at that path. requestedVolumeMounts := map[string]bool{} - for _, vm := range step.VolumeMounts { + for _, vm := range s.VolumeMounts { requestedVolumeMounts[filepath.Clean(vm.MountPath)] = true } for _, imp := range implicitVolumeMounts { if !requestedVolumeMounts[filepath.Clean(imp.MountPath)] { - step.VolumeMounts = append(step.VolumeMounts, imp) + s.VolumeMounts = append(s.VolumeMounts, imp) } } - if step.WorkingDir == "" { - step.WorkingDir = workspaceDir + if s.WorkingDir == "" { + s.WorkingDir = workspaceDir } - if step.Name == "" { - step.Name = fmt.Sprintf("%v%d", unnamedInitContainerPrefix, i) + if s.Name == "" { + s.Name = fmt.Sprintf("%v%d", unnamedInitContainerPrefix, i) } else { - step.Name = names.SimpleNameGenerator.RestrictLength(fmt.Sprintf("%v%v", containerPrefix, step.Name)) + s.Name = names.SimpleNameGenerator.RestrictLength(fmt.Sprintf("%v%v", containerPrefix, s.Name)) } - // use the step name to add the entrypoint biary as an init container - if step.Name == names.SimpleNameGenerator.RestrictLength(fmt.Sprintf("%v%v", containerPrefix, entrypoint.InitContainerName)) { - initContainers = append(initContainers, *step) + // use the container name to add the entrypoint biary as an init container + if s.Name == names.SimpleNameGenerator.RestrictLength(fmt.Sprintf("%v%v", containerPrefix, entrypoint.InitContainerName)) { + initSteps = append(initSteps, s) } else { - zeroNonMaxResourceRequests(step, i, maxIndicesByResource) - podContainers = append(podContainers, *step) + zeroNonMaxResourceRequests(&s, i, maxIndicesByResource) + podSteps = append(podSteps, s) } } + // Add podTemplate Volumes to the explicitly declared use volumes + volumes := append(taskSpec.Volumes, taskRun.Spec.PodTemplate.Volumes...) // Add our implicit volumes and any volumes needed for secrets to the explicitly // declared user volumes. - volumes := append(taskSpec.Volumes, implicitVolumes...) + volumes = append(volumes, implicitVolumes...) volumes = append(volumes, secrets...) if err := v1alpha1.ValidateVolumes(volumes); err != nil { return nil, err @@ -314,25 +311,25 @@ func MakePod(taskRun *v1alpha1.TaskRun, taskSpec v1alpha1.TaskSpec, kubeclient k } gibberish := hex.EncodeToString(b) - mergedInitContainers, err := merge.CombineStepsWithStepTemplate(taskSpec.StepTemplate, initContainers) + mergedInitSteps, err := v1alpha1.MergeStepsWithStepTemplate(taskSpec.StepTemplate, initSteps) if err != nil { return nil, err } - mergedPodContainers, err := merge.CombineStepsWithStepTemplate(taskSpec.StepTemplate, podContainers) - if err != nil { - return nil, err + var mergedInitContainers []corev1.Container + for _, s := range mergedInitSteps { + mergedInitContainers = append(mergedInitContainers, s.Container) } - - // The ContainerTemplate field is deprecated (#977) - mergedInitContainers, err = merge.CombineStepsWithStepTemplate(taskSpec.ContainerTemplate, mergedInitContainers) + mergedPodSteps, err := v1alpha1.MergeStepsWithStepTemplate(taskSpec.StepTemplate, podSteps) if err != nil { return nil, err } - mergedPodContainers, err = merge.CombineStepsWithStepTemplate(taskSpec.ContainerTemplate, mergedPodContainers) - if err != nil { - return nil, err + var mergedPodContainers []corev1.Container + for _, s := range mergedPodSteps { + mergedPodContainers = append(mergedPodContainers, s.Container) } + podTemplate := v1alpha1.CombinedPodTemplate(taskRun.Spec.PodTemplate, taskRun.Spec.NodeSelector, taskRun.Spec.Tolerations, taskRun.Spec.Affinity) + return &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ // We execute the build's pod in the same namespace as where the build was @@ -356,9 +353,10 @@ func MakePod(taskRun *v1alpha1.TaskRun, taskSpec v1alpha1.TaskSpec, kubeclient k Containers: mergedPodContainers, ServiceAccountName: taskRun.Spec.ServiceAccount, Volumes: volumes, - NodeSelector: taskRun.Spec.NodeSelector, - Tolerations: taskRun.Spec.Tolerations, - Affinity: taskRun.Spec.Affinity, + NodeSelector: podTemplate.NodeSelector, + Tolerations: podTemplate.Tolerations, + Affinity: podTemplate.Affinity, + SecurityContext: podTemplate.SecurityContext, }, }, nil } @@ -413,30 +411,30 @@ func makeAnnotations(s *v1alpha1.TaskRun) map[string]string { // one at a time, so we want pods to only request the maximum resources needed // at any single point in time. If no container has an explicit resource // request, all requests are set to 0. -func zeroNonMaxResourceRequests(container *corev1.Container, containerIndex int, maxIndicesByResource map[corev1.ResourceName]int) { - if container.Resources.Requests == nil { - container.Resources.Requests = corev1.ResourceList{} +func zeroNonMaxResourceRequests(step *v1alpha1.Step, stepIndex int, maxIndicesByResource map[corev1.ResourceName]int) { + if step.Resources.Requests == nil { + step.Resources.Requests = corev1.ResourceList{} } for name, maxIdx := range maxIndicesByResource { - if maxIdx != containerIndex { - container.Resources.Requests[name] = zeroQty + if maxIdx != stepIndex { + step.Resources.Requests[name] = zeroQty } } } // findMaxResourceRequest returns the index of the container with the maximum // request for the given resource from among the given set of containers. -func findMaxResourceRequest(containers []corev1.Container, resourceNames ...corev1.ResourceName) map[corev1.ResourceName]int { +func findMaxResourceRequest(steps []v1alpha1.Step, resourceNames ...corev1.ResourceName) map[corev1.ResourceName]int { maxIdxs := make(map[corev1.ResourceName]int, len(resourceNames)) maxReqs := make(map[corev1.ResourceName]resource.Quantity, len(resourceNames)) for _, name := range resourceNames { maxIdxs[name] = -1 maxReqs[name] = zeroQty } - for i, c := range containers { + for i, s := range steps { for _, name := range resourceNames { maxReq := maxReqs[name] - req, exists := c.Resources.Requests[name] + req, exists := s.Container.Resources.Requests[name] if exists && req.Cmp(maxReq) > 0 { maxIdxs[name] = i maxReqs[name] = req diff --git a/vendor/github.com/tektoncd/pipeline/pkg/reconciler/v1alpha1/taskrun/resources/taskresourceresolution.go b/vendor/github.com/tektoncd/pipeline/pkg/reconciler/v1alpha1/taskrun/resources/taskresourceresolution.go index 41710e238..9273524f0 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/reconciler/v1alpha1/taskrun/resources/taskresourceresolution.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/reconciler/v1alpha1/taskrun/resources/taskresourceresolution.go @@ -9,7 +9,7 @@ You may obtain a copy of the License at Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either extress or implied. +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ diff --git a/vendor/github.com/tektoncd/pipeline/pkg/reconciler/v1alpha1/taskrun/resources/taskspec.go b/vendor/github.com/tektoncd/pipeline/pkg/reconciler/v1alpha1/taskrun/resources/taskspec.go index 0a1a6d9f3..90ddb795a 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/reconciler/v1alpha1/taskrun/resources/taskspec.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/reconciler/v1alpha1/taskrun/resources/taskspec.go @@ -9,7 +9,7 @@ You may obtain a copy of the License at Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either extress or implied. +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ diff --git a/vendor/github.com/tektoncd/pipeline/pkg/templating/templating.go b/vendor/github.com/tektoncd/pipeline/pkg/templating/templating.go deleted file mode 100644 index c2ccb0c95..000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/templating/templating.go +++ /dev/null @@ -1,71 +0,0 @@ -/* - Copyright 2019 Knative Authors LLC - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package templating - -import ( - "fmt" - "regexp" - "strings" - - "github.com/knative/pkg/apis" -) - -const parameterSubstitution = "[_a-zA-Z][_a-zA-Z0-9.-]*" - -func ValidateVariable(name, value, prefix, contextPrefix, locationName, path string, vars map[string]struct{}) *apis.FieldError { - if vs, present := extractVariablesFromString(value, contextPrefix+prefix); present { - for _, v := range vs { - if _, ok := vars[v]; !ok { - return &apis.FieldError{ - Message: fmt.Sprintf("non-existent variable in %q for %s %s", value, locationName, name), - Paths: []string{path + "." + name}, - } - } - } - } - return nil -} - -func extractVariablesFromString(s, prefix string) ([]string, bool) { - pattern := fmt.Sprintf("\\$({%s.(?P%s)})", prefix, parameterSubstitution) - re := regexp.MustCompile(pattern) - matches := re.FindAllStringSubmatch(s, -1) - if len(matches) == 0 { - return []string{}, false - } - vars := make([]string, len(matches)) - for i, match := range matches { - groups := matchGroups(match, re) - // foo -> foo - // foo.bar -> foo - // foo.bar.baz -> foo - vars[i] = strings.SplitN(groups["var"], ".", 2)[0] - } - return vars, true -} - -func matchGroups(matches []string, pattern *regexp.Regexp) map[string]string { - groups := make(map[string]string) - for i, name := range pattern.SubexpNames()[1:] { - groups[name] = matches[i+1] - } - return groups -} - -func ApplyReplacements(in string, replacements map[string]string) string { - for k, v := range replacements { - in = strings.Replace(in, fmt.Sprintf("${%s}", k), v, -1) - } - return in -} diff --git a/vendor/github.com/tektoncd/pipeline/test/adoc.go b/vendor/github.com/tektoncd/pipeline/test/adoc.go index 02148db8e..3deae9765 100644 --- a/vendor/github.com/tektoncd/pipeline/test/adoc.go +++ b/vendor/github.com/tektoncd/pipeline/test/adoc.go @@ -1,9 +1,12 @@ /* -Copyright 2018 Knative Authors LLC +Copyright 2019 The Tekton Authors + Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. diff --git a/vendor/github.com/tektoncd/pipeline/test/build_logs.go b/vendor/github.com/tektoncd/pipeline/test/build_logs.go index b30733e94..65c0cbf74 100644 --- a/vendor/github.com/tektoncd/pipeline/test/build_logs.go +++ b/vendor/github.com/tektoncd/pipeline/test/build_logs.go @@ -21,10 +21,10 @@ import ( "io/ioutil" "strings" - "github.com/knative/pkg/test/logging" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" + "knative.dev/pkg/test/logging" ) // CollectPodLogs will get the logs for all containers in a Pod diff --git a/vendor/github.com/tektoncd/pipeline/test/builder/README.md b/vendor/github.com/tektoncd/pipeline/test/builder/README.md index b69fc1ed4..09246a853 100644 --- a/vendor/github.com/tektoncd/pipeline/test/builder/README.md +++ b/vendor/github.com/tektoncd/pipeline/test/builder/README.md @@ -94,7 +94,7 @@ func MyTest(t *testing.T) { tb.InputsParam("myarg", tb.ParamDefault("mydefault")), ), tb.Step("mycontainer", "myimage", tb.Command("/mycmd"), - tb.Args("--my-arg=${inputs.params.myarg}"), + tb.Args("--my-arg=$(inputs.params.myarg)"), ), ), )) diff --git a/vendor/github.com/tektoncd/pipeline/test/builder/condition.go b/vendor/github.com/tektoncd/pipeline/test/builder/condition.go new file mode 100644 index 000000000..2d1c38897 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/test/builder/condition.go @@ -0,0 +1,84 @@ +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package builder + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" +) + +// ConditionOp is an operation which modifies a Condition struct. +type ConditionOp func(*v1alpha1.Condition) + +// ConditionSpecOp is an operation which modifies a ConditionSpec struct. +type ConditionSpecOp func(spec *v1alpha1.ConditionSpec) + +// Condition creates a Condition with default values. +// Any number of Condition modifiers can be passed to transform it. +func Condition(name, namespace string, ops ...ConditionOp) *v1alpha1.Condition { + condition := &v1alpha1.Condition{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: name, + }, + } + for _, op := range ops { + op(condition) + } + return condition +} + +// ConditionSpec creates a ConditionSpec with default values. +// Any number of ConditionSpec modifiers can be passed to transform it. +func ConditionSpec(ops ...ConditionSpecOp) ConditionOp { + return func(Condition *v1alpha1.Condition) { + ConditionSpec := &Condition.Spec + for _, op := range ops { + op(ConditionSpec) + } + Condition.Spec = *ConditionSpec + } +} + +// ConditionSpecCheck adds a Container, with the specified name and image, to the Condition Spec Check. +// Any number of Container modifiers can be passed to transform it. +func ConditionSpecCheck(name, image string, ops ...ContainerOp) ConditionSpecOp { + return func(spec *v1alpha1.ConditionSpec) { + c := &corev1.Container{ + Name: name, + Image: image, + } + for _, op := range ops { + op(c) + } + spec.Check = *c + } +} + +// ConditionParamSpec adds a param, with specified name, to the Spec. +// Any number of ParamSpec modifiers can be passed to transform it. +func ConditionParamSpec(name string, pt v1alpha1.ParamType, ops ...ParamSpecOp) ConditionSpecOp { + return func(ps *v1alpha1.ConditionSpec) { + pp := &v1alpha1.ParamSpec{Name: name, Type: pt} + for _, op := range ops { + op(pp) + } + ps.Params = append(ps.Params, *pp) + } +} diff --git a/vendor/github.com/tektoncd/pipeline/test/builder/container.go b/vendor/github.com/tektoncd/pipeline/test/builder/container.go index 2d6ce7e0a..41ffcef29 100644 --- a/vendor/github.com/tektoncd/pipeline/test/builder/container.go +++ b/vendor/github.com/tektoncd/pipeline/test/builder/container.go @@ -1,9 +1,12 @@ /* Copyright 2019 The Tekton Authors + Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. diff --git a/vendor/github.com/tektoncd/pipeline/test/builder/doc.go b/vendor/github.com/tektoncd/pipeline/test/builder/doc.go index 025ad4c19..968521d63 100644 --- a/vendor/github.com/tektoncd/pipeline/test/builder/doc.go +++ b/vendor/github.com/tektoncd/pipeline/test/builder/doc.go @@ -1,9 +1,12 @@ /* Copyright 2019 The Tekton Authors + Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. diff --git a/vendor/github.com/tektoncd/pipeline/test/builder/owner_reference.go b/vendor/github.com/tektoncd/pipeline/test/builder/owner_reference.go index b0ab58efc..8029d5d28 100644 --- a/vendor/github.com/tektoncd/pipeline/test/builder/owner_reference.go +++ b/vendor/github.com/tektoncd/pipeline/test/builder/owner_reference.go @@ -1,9 +1,12 @@ /* Copyright 2019 The Tekton Authors + Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. diff --git a/vendor/github.com/tektoncd/pipeline/test/builder/param.go b/vendor/github.com/tektoncd/pipeline/test/builder/param.go new file mode 100644 index 000000000..bcc7ca54c --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/test/builder/param.go @@ -0,0 +1,50 @@ +/* +Copyright 2019 The Tekton Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package builder + +import "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" + +// ParamSpecOp is an operation which modify a ParamSpec struct. +type ParamSpecOp func(*v1alpha1.ParamSpec) + +// ArrayOrString creates an ArrayOrString of type ParamTypeString or ParamTypeArray, based on +// how many inputs are given (>1 input will create an array, not string). +func ArrayOrString(value string, additionalValues ...string) *v1alpha1.ArrayOrString { + if len(additionalValues) > 0 { + additionalValues = append([]string{value}, additionalValues...) + return &v1alpha1.ArrayOrString{ + Type: v1alpha1.ParamTypeArray, + ArrayVal: additionalValues, + } + } + return &v1alpha1.ArrayOrString{ + Type: v1alpha1.ParamTypeString, + StringVal: value, + } +} + +// ParamSpecDescription sets the description of a ParamSpec. +func ParamSpecDescription(desc string) ParamSpecOp { + return func(ps *v1alpha1.ParamSpec) { + ps.Description = desc + } +} + +// ParamSpecDefault sets the default value of a ParamSpec. +func ParamSpecDefault(value string, additionalValues ...string) ParamSpecOp { + arrayOrString := ArrayOrString(value, additionalValues...) + return func(ps *v1alpha1.ParamSpec) { + ps.Default = arrayOrString + } +} diff --git a/vendor/github.com/tektoncd/pipeline/test/builder/pipeline.go b/vendor/github.com/tektoncd/pipeline/test/builder/pipeline.go index 8d667d335..1209ca707 100644 --- a/vendor/github.com/tektoncd/pipeline/test/builder/pipeline.go +++ b/vendor/github.com/tektoncd/pipeline/test/builder/pipeline.go @@ -1,9 +1,12 @@ /* Copyright 2019 The Tekton Authors + Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -16,11 +19,11 @@ package builder import ( "time" - "github.com/knative/pkg/apis" "github.com/tektoncd/pipeline/pkg/apis/config" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "knative.dev/pkg/apis" ) // PipelineOp is an operation which modify a Pipeline struct. @@ -29,9 +32,6 @@ type PipelineOp func(*v1alpha1.Pipeline) // PipelineSpecOp is an operation which modify a PipelineSpec struct. type PipelineSpecOp func(*v1alpha1.PipelineSpec) -// PipelineParamOp is an operation which modify a ParamSpec struct. -type PipelineParamOp func(*v1alpha1.ParamSpec) - // PipelineTaskOp is an operation which modify a PipelineTask struct. type PipelineTaskOp func(*v1alpha1.PipelineTask) @@ -53,9 +53,12 @@ type PipelineResourceSpecOp func(*v1alpha1.PipelineResourceSpec) // PipelineTaskInputResourceOp is an operation which modifies a PipelineTaskInputResource. type PipelineTaskInputResourceOp func(*v1alpha1.PipelineTaskInputResource) -// PipelineRunStatusOp is an operation which modify a PipelineRunStatus +// PipelineRunStatusOp is an operation which modifies a PipelineRunStatus type PipelineRunStatusOp func(*v1alpha1.PipelineRunStatus) +// PipelineTaskConditionOp is an operation which modifies a PipelineTaskCondition +type PipelineTaskConditionOp func(condition *v1alpha1.PipelineTaskCondition) + // Pipeline creates a Pipeline with default values. // Any number of Pipeline modifier can be passed to transform it. func Pipeline(name, namespace string, ops ...PipelineOp) *v1alpha1.Pipeline { @@ -111,11 +114,11 @@ func PipelineDeclaredResource(name string, t v1alpha1.PipelineResourceType) Pipe } } -// ParamSpec adds a param, with specified name, to the Spec. -// Any number of ParamSpec modifiers can be passed to transform it. -func PipelineParam(name string, ops ...PipelineParamOp) PipelineSpecOp { +// PipelineParamSpec adds a param, with specified name and type, to the PipelineSpec. +// Any number of PipelineParamSpec modifiers can be passed to transform it. +func PipelineParamSpec(name string, pt v1alpha1.ParamType, ops ...ParamSpecOp) PipelineSpecOp { return func(ps *v1alpha1.PipelineSpec) { - pp := &v1alpha1.ParamSpec{Name: name} + pp := &v1alpha1.ParamSpec{Name: name, Type: pt} for _, op := range ops { op(pp) } @@ -123,20 +126,6 @@ func PipelineParam(name string, ops ...PipelineParamOp) PipelineSpecOp { } } -// PipelineParamDescription sets the description to the ParamSpec. -func PipelineParamDescription(desc string) PipelineParamOp { - return func(pp *v1alpha1.ParamSpec) { - pp.Description = desc - } -} - -// PipelineParamDefault sets the default value to the ParamSpec. -func PipelineParamDefault(value string) PipelineParamOp { - return func(pp *v1alpha1.ParamSpec) { - pp.Default = value - } -} - // PipelineTask adds a PipelineTask, with specified name and task name, to the PipelineSpec. // Any number of PipelineTask modifier can be passed to transform it. func PipelineTask(name, taskName string, ops ...PipelineTaskOp) PipelineSpecOp { @@ -175,12 +164,13 @@ func PipelineTaskRefKind(kind v1alpha1.TaskKind) PipelineTaskOp { } } -// PipelineTaskParam adds a Param, with specified name and value, to the PipelineTask. -func PipelineTaskParam(name, value string) PipelineTaskOp { +// PipelineTaskParam adds a ResourceParam, with specified name and value, to the PipelineTask. +func PipelineTaskParam(name string, value string, additionalValues ...string) PipelineTaskOp { + arrayOrString := ArrayOrString(value, additionalValues...) return func(pt *v1alpha1.PipelineTask) { pt.Params = append(pt.Params, v1alpha1.Param{ Name: name, - Value: value, + Value: *arrayOrString, }) } } @@ -227,6 +217,34 @@ func PipelineTaskOutputResource(name, resource string) PipelineTaskOp { } } +// PipelineTaskCondition adds a condition to the PipelineTask with the +// specified conditionRef. Any number of PipelineTaskCondition modifiers can be passed +// to transform it +func PipelineTaskCondition(conditionRef string, ops ...PipelineTaskConditionOp) PipelineTaskOp { + return func(pt *v1alpha1.PipelineTask) { + c := &v1alpha1.PipelineTaskCondition{ + ConditionRef: conditionRef, + } + for _, op := range ops { + op(c) + } + pt.Conditions = append(pt.Conditions, *c) + } +} + +// PipelineTaskCondition adds a parameter to a PipelineTaskCondition +func PipelineTaskConditionParam(name, val string) PipelineTaskConditionOp { + return func(condition *v1alpha1.PipelineTaskCondition) { + if condition.Params == nil { + condition.Params = []v1alpha1.Param{} + } + condition.Params = append(condition.Params, v1alpha1.Param{ + Name: name, + Value: *ArrayOrString(val), + }) + } +} + // PipelineRun creates a PipelineRun with default values. // Any number of PipelineRun modifier can be passed to transform it. func PipelineRun(name, namespace string, ops ...PipelineRunOp) *v1alpha1.PipelineRun { @@ -324,11 +342,12 @@ func PipelineRunServiceAccountTask(taskName, sa string) PipelineRunSpecOp { } // PipelineRunParam add a param, with specified name and value, to the PipelineRunSpec. -func PipelineRunParam(name, value string) PipelineRunSpecOp { +func PipelineRunParam(name string, value string, additionalValues ...string) PipelineRunSpecOp { + arrayOrString := ArrayOrString(value, additionalValues...) return func(prs *v1alpha1.PipelineRunSpec) { prs.Params = append(prs.Params, v1alpha1.Param{ Name: name, - Value: value, + Value: *arrayOrString, }) } } @@ -378,7 +397,7 @@ func PipelineRunStatus(ops ...PipelineRunStatusOp) PipelineRunOp { } } -// PipelineRunStatusCondition adds a Condition to the TaskRunStatus. +// PipelineRunStatusCondition adds a StatusCondition to the TaskRunStatus. func PipelineRunStatusCondition(condition apis.Condition) PipelineRunStatusOp { return func(s *v1alpha1.PipelineRunStatus) { s.Conditions = append(s.Conditions, condition) @@ -399,10 +418,13 @@ func PipelineRunCompletionTime(t time.Time) PipelineRunStatusOp { } } -// PipelineRunTaskRunsStatus sets the TaskRuns of the PipelineRunStatus. -func PipelineRunTaskRunsStatus(taskRuns map[string]*v1alpha1.PipelineRunTaskRunStatus) PipelineRunStatusOp { +// PipelineRunTaskRunsStatus sets the status of TaskRun to the PipelineRunStatus. +func PipelineRunTaskRunsStatus(taskRunName string, status *v1alpha1.PipelineRunTaskRunStatus) PipelineRunStatusOp { return func(s *v1alpha1.PipelineRunStatus) { - s.TaskRuns = taskRuns + if s.TaskRuns == nil { + s.TaskRuns = make(map[string]*v1alpha1.PipelineRunTaskRunStatus) + } + s.TaskRuns[taskRunName] = status } } @@ -434,10 +456,10 @@ func PipelineResourceSpec(resourceType v1alpha1.PipelineResourceType, ops ...Pip } } -// PipelineResourceSpecParam adds a Param, with specified name and value, to the PipelineResourceSpec. +// PipelineResourceSpecParam adds a ResourceParam, with specified name and value, to the PipelineResourceSpec. func PipelineResourceSpecParam(name, value string) PipelineResourceSpecOp { return func(spec *v1alpha1.PipelineResourceSpec) { - spec.Params = append(spec.Params, v1alpha1.Param{ + spec.Params = append(spec.Params, v1alpha1.ResourceParam{ Name: name, Value: value, }) diff --git a/vendor/github.com/tektoncd/pipeline/test/builder/pod.go b/vendor/github.com/tektoncd/pipeline/test/builder/pod.go index ae559dd50..8c56c96ae 100644 --- a/vendor/github.com/tektoncd/pipeline/test/builder/pod.go +++ b/vendor/github.com/tektoncd/pipeline/test/builder/pod.go @@ -1,9 +1,12 @@ /* Copyright 2019 The Tekton Authors + Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. diff --git a/vendor/github.com/tektoncd/pipeline/test/builder/step.go b/vendor/github.com/tektoncd/pipeline/test/builder/step.go new file mode 100644 index 000000000..e54cc5c27 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/test/builder/step.go @@ -0,0 +1,136 @@ +/* +Copyright 2019 The Tekton Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package builder + +import ( + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" +) + +// StepOp is an operation which modifies a Container struct. +type StepOp func(*v1alpha1.Step) + +// StepCommand sets the command to the Container (step in this case). +func StepCommand(args ...string) StepOp { + return func(step *v1alpha1.Step) { + step.Command = args + } +} + +// StepArgs sets the command arguments to the Container (step in this case). +func StepArgs(args ...string) StepOp { + return func(step *v1alpha1.Step) { + step.Args = args + } +} + +// StepEnvVar add an environment variable, with specified name and value, to the Container (step). +func StepEnvVar(name, value string) StepOp { + return func(step *v1alpha1.Step) { + step.Env = append(step.Env, corev1.EnvVar{ + Name: name, + Value: value, + }) + } +} + +// StepWorkingDir sets the WorkingDir on the Container. +func StepWorkingDir(workingDir string) StepOp { + return func(step *v1alpha1.Step) { + step.WorkingDir = workingDir + } +} + +// StepVolumeMount add a VolumeMount to the Container (step). +func StepVolumeMount(name, mountPath string, ops ...VolumeMountOp) StepOp { + return func(step *v1alpha1.Step) { + mount := &corev1.VolumeMount{ + Name: name, + MountPath: mountPath, + } + for _, op := range ops { + op(mount) + } + step.VolumeMounts = append(step.VolumeMounts, *mount) + } +} + +// StepResources adds ResourceRequirements to the Container (step). +func StepResources(ops ...ResourceRequirementsOp) StepOp { + return func(step *v1alpha1.Step) { + rr := &corev1.ResourceRequirements{} + for _, op := range ops { + op(rr) + } + step.Resources = *rr + } +} + +// StepLimits adds Limits to the ResourceRequirements. +func StepLimits(ops ...ResourceListOp) ResourceRequirementsOp { + return func(rr *corev1.ResourceRequirements) { + limits := corev1.ResourceList{} + for _, op := range ops { + op(limits) + } + rr.Limits = limits + } +} + +// StepRequests adds Requests to the ResourceRequirements. +func StepRequests(ops ...ResourceListOp) ResourceRequirementsOp { + return func(rr *corev1.ResourceRequirements) { + requests := corev1.ResourceList{} + for _, op := range ops { + op(requests) + } + rr.Requests = requests + } +} + +// StepCPU sets the CPU resource on the ResourceList. +func StepCPU(val string) ResourceListOp { + return func(r corev1.ResourceList) { + r[corev1.ResourceCPU] = resource.MustParse(val) + } +} + +// StepMemory sets the memory resource on the ResourceList. +func StepMemory(val string) ResourceListOp { + return func(r corev1.ResourceList) { + r[corev1.ResourceMemory] = resource.MustParse(val) + } +} + +// StepEphemeralStorage sets the ephemeral storage resource on the ResourceList. +func StepEphemeralStorage(val string) ResourceListOp { + return func(r corev1.ResourceList) { + r[corev1.ResourceEphemeralStorage] = resource.MustParse(val) + } +} + +// StepTerminationMessagePath sets the source of the termination message. +func StepTerminationMessagePath(terminationMessagePath string) StepOp { + return func(step *v1alpha1.Step) { + step.TerminationMessagePath = terminationMessagePath + } +} + +// StepTerminationMessagePolicy sets the policy of the termination message. +func StepTerminationMessagePolicy(terminationMessagePolicy corev1.TerminationMessagePolicy) StepOp { + return func(step *v1alpha1.Step) { + step.TerminationMessagePolicy = terminationMessagePolicy + } +} diff --git a/vendor/github.com/tektoncd/pipeline/test/builder/task.go b/vendor/github.com/tektoncd/pipeline/test/builder/task.go index c2e3b6e57..6cc54e25b 100644 --- a/vendor/github.com/tektoncd/pipeline/test/builder/task.go +++ b/vendor/github.com/tektoncd/pipeline/test/builder/task.go @@ -1,9 +1,12 @@ /* Copyright 2019 The Tekton Authors + Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -16,12 +19,12 @@ package builder import ( "time" - "github.com/knative/pkg/apis" "github.com/tektoncd/pipeline/pkg/apis/config" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" "github.com/tektoncd/pipeline/pkg/reconciler/v1alpha1/taskrun/resources" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "knative.dev/pkg/apis" ) // TaskOp is an operation which modify a Task struct. @@ -39,9 +42,6 @@ type InputsOp func(*v1alpha1.Inputs) // OutputsOp is an operation which modify an Outputs struct. type OutputsOp func(*v1alpha1.Outputs) -// TaskParamOp is an operation which modify a ParamSpec struct. -type TaskParamOp func(*v1alpha1.ParamSpec) - // TaskRunOp is an operation which modify a TaskRun struct. type TaskRunOp func(*v1alpha1.TaskRun) @@ -138,19 +138,19 @@ func TaskSpec(ops ...TaskSpecOp) TaskOp { // Step adds a step with the specified name and image to the TaskSpec. // Any number of Container modifier can be passed to transform it. -func Step(name, image string, ops ...ContainerOp) TaskSpecOp { +func Step(name, image string, ops ...StepOp) TaskSpecOp { return func(spec *v1alpha1.TaskSpec) { if spec.Steps == nil { - spec.Steps = []corev1.Container{} + spec.Steps = []v1alpha1.Step{} } - step := &corev1.Container{ + step := v1alpha1.Step{Container: corev1.Container{ Name: name, Image: image, - } + }} for _, op := range ops { - op(step) + op(&step) } - spec.Steps = append(spec.Steps, *step) + spec.Steps = append(spec.Steps, step) } } @@ -158,7 +158,6 @@ func Step(name, image string, ops ...ContainerOp) TaskSpecOp { func TaskStepTemplate(ops ...ContainerOp) TaskSpecOp { return func(spec *v1alpha1.TaskSpec) { base := &corev1.Container{} - for _, op := range ops { op(base) } @@ -166,19 +165,6 @@ func TaskStepTemplate(ops ...ContainerOp) TaskSpecOp { } } -// TaskContainerTemplate adds the deprecated (#977) base container for -// all steps in the task. ContainerTemplate is now StepTemplate. -func TaskContainerTemplate(ops ...ContainerOp) TaskSpecOp { - return func(spec *v1alpha1.TaskSpec) { - base := &corev1.Container{} - - for _, op := range ops { - op(base) - } - spec.ContainerTemplate = base - } -} - // TaskVolume adds a volume with specified name to the TaskSpec. // Any number of Volume modifier can be passed to transform it. func TaskVolume(name string, ops ...VolumeOp) TaskSpecOp { @@ -249,29 +235,15 @@ func OutputsResource(name string, resourceType v1alpha1.PipelineResourceType) Ou } } -// InputsParam adds a param, with specified name, to the Inputs. -// Any number of ParamSpec modifier can be passed to transform it. -func InputsParam(name string, ops ...TaskParamOp) InputsOp { +// InputsParamSpec adds a ParamSpec, with specified name and type, to the Inputs. +// Any number of TaskParamSpec modifier can be passed to transform it. +func InputsParamSpec(name string, pt v1alpha1.ParamType, ops ...ParamSpecOp) InputsOp { return func(i *v1alpha1.Inputs) { - tp := &v1alpha1.ParamSpec{Name: name} + ps := &v1alpha1.ParamSpec{Name: name, Type: pt} for _, op := range ops { - op(tp) + op(ps) } - i.Params = append(i.Params, *tp) - } -} - -// ParamDescripiton sets the description to the ParamSpec. -func ParamDescription(desc string) TaskParamOp { - return func(tp *v1alpha1.ParamSpec) { - tp.Description = desc - } -} - -// ParamDefault sets the default value to the ParamSpec. -func ParamDefault(value string) TaskParamOp { - return func(tp *v1alpha1.ParamSpec) { - tp.Default = value + i.Params = append(i.Params, *ps) } } @@ -311,8 +283,8 @@ func PodName(name string) TaskRunStatusOp { } } -// Condition adds a Condition to the TaskRunStatus. -func Condition(condition apis.Condition) TaskRunStatusOp { +// StatusCondition adds a StatusCondition to the TaskRunStatus. +func StatusCondition(condition apis.Condition) TaskRunStatusOp { return func(s *v1alpha1.TaskRunStatus) { s.Conditions = append(s.Conditions, condition) } @@ -509,11 +481,12 @@ func TaskRunInputs(ops ...TaskRunInputsOp) TaskRunSpecOp { } // TaskRunInputsParam add a param, with specified name and value, to the TaskRunInputs. -func TaskRunInputsParam(name, value string) TaskRunInputsOp { +func TaskRunInputsParam(name, value string, additionalValues ...string) TaskRunInputsOp { + arrayOrString := ArrayOrString(value, additionalValues...) return func(i *v1alpha1.TaskRunInputs) { i.Params = append(i.Params, v1alpha1.Param{ Name: name, - Value: value, + Value: *arrayOrString, }) } } diff --git a/vendor/github.com/tektoncd/pipeline/test/clients.go b/vendor/github.com/tektoncd/pipeline/test/clients.go index b88fbf40f..1e338f68f 100644 --- a/vendor/github.com/tektoncd/pipeline/test/clients.go +++ b/vendor/github.com/tektoncd/pipeline/test/clients.go @@ -1,9 +1,12 @@ /* -Copyright 2018 Knative Authors LLC +Copyright 2019 The Tekton Authors + Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -38,9 +41,9 @@ package test import ( "testing" - knativetest "github.com/knative/pkg/test" "github.com/tektoncd/pipeline/pkg/client/clientset/versioned" "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1" + knativetest "knative.dev/pkg/test" ) // clients holds instances of interfaces for making requests to the Pipeline controllers. @@ -52,6 +55,7 @@ type clients struct { TaskRunClient v1alpha1.TaskRunInterface PipelineRunClient v1alpha1.PipelineRunInterface PipelineResourceClient v1alpha1.PipelineResourceInterface + ConditionClient v1alpha1.ConditionInterface } // newClients instantiates and returns several clientsets required for making requests to the @@ -81,5 +85,6 @@ func newClients(t *testing.T, configPath, clusterName, namespace string) *client c.TaskRunClient = cs.TektonV1alpha1().TaskRuns(namespace) c.PipelineRunClient = cs.TektonV1alpha1().PipelineRuns(namespace) c.PipelineResourceClient = cs.TektonV1alpha1().PipelineResources(namespace) + c.ConditionClient = cs.TektonV1alpha1().Conditions(namespace) return c } diff --git a/vendor/github.com/tektoncd/pipeline/test/controller.go b/vendor/github.com/tektoncd/pipeline/test/controller.go index a57de94a4..e8b28d77f 100644 --- a/vendor/github.com/tektoncd/pipeline/test/controller.go +++ b/vendor/github.com/tektoncd/pipeline/test/controller.go @@ -1,9 +1,12 @@ /* Copyright 2019 The Tekton Authors + Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -14,19 +17,29 @@ limitations under the License. package test import ( + "context" "testing" - "github.com/knative/pkg/controller" + // Link in the fakes so they get injected into injection.Fake + fakepipelineclient "github.com/tektoncd/pipeline/pkg/client/injection/client/fake" + fakeclustertaskinformer "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/clustertask/fake" + fakeconditioninformer "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/condition/fake" + fakepipelineinformer "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/pipeline/fake" + fakeresourceinformer "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/pipelineresource/fake" + fakepipelineruninformer "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/pipelinerun/fake" + faketaskinformer "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/task/fake" + faketaskruninformer "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/taskrun/fake" + fakekubeclient "knative.dev/pkg/injection/clients/kubeclient/fake" + fakepodinformer "knative.dev/pkg/injection/informers/kubeinformers/corev1/pod/fake" + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" fakepipelineclientset "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/fake" - informers "github.com/tektoncd/pipeline/pkg/client/informers/externalversions" informersv1alpha1 "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1alpha1" "go.uber.org/zap/zaptest/observer" corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/runtime" - kubeinformers "k8s.io/client-go/informers" coreinformers "k8s.io/client-go/informers/core/v1" fakekubeclientset "k8s.io/client-go/kubernetes/fake" + "knative.dev/pkg/controller" ) // GetLogMessages returns a list of all string logs in logs. @@ -47,6 +60,7 @@ type Data struct { Tasks []*v1alpha1.Task ClusterTasks []*v1alpha1.ClusterTask PipelineResources []*v1alpha1.PipelineResource + Conditions []*v1alpha1.Condition Pods []*corev1.Pod Namespaces []*corev1.Namespace } @@ -65,6 +79,7 @@ type Informers struct { Task informersv1alpha1.TaskInformer ClusterTask informersv1alpha1.ClusterTaskInformer PipelineResource informersv1alpha1.PipelineResourceInformer + Condition informersv1alpha1.ConditionInformer Pod coreinformers.PodInformer } @@ -73,90 +88,97 @@ type TestAssets struct { Controller *controller.Impl Logs *observer.ObservedLogs Clients Clients - Informers Informers } // SeedTestData returns Clients and Informers populated with the // given Data. -func SeedTestData(t *testing.T, d Data) (Clients, Informers) { - objs := []runtime.Object{} - for _, r := range d.PipelineResources { - objs = append(objs, r) - } - for _, p := range d.Pipelines { - objs = append(objs, p) - } - for _, pr := range d.PipelineRuns { - objs = append(objs, pr) - } - for _, t := range d.Tasks { - objs = append(objs, t) - } - for _, ct := range d.ClusterTasks { - objs = append(objs, ct) - } - for _, tr := range d.TaskRuns { - objs = append(objs, tr) - } - - kubeObjs := []runtime.Object{} - for _, p := range d.Pods { - kubeObjs = append(kubeObjs, p) - } - for _, n := range d.Namespaces { - kubeObjs = append(kubeObjs, n) - } +func SeedTestData(t *testing.T, ctx context.Context, d Data) (Clients, Informers) { c := Clients{ - Pipeline: fakepipelineclientset.NewSimpleClientset(objs...), - Kube: fakekubeclientset.NewSimpleClientset(kubeObjs...), + Kube: fakekubeclient.Get(ctx), + Pipeline: fakepipelineclient.Get(ctx), } - sharedInformer := informers.NewSharedInformerFactory(c.Pipeline, 0) - kubeInformer := kubeinformers.NewSharedInformerFactory(c.Kube, 0) i := Informers{ - PipelineRun: sharedInformer.Tekton().V1alpha1().PipelineRuns(), - Pipeline: sharedInformer.Tekton().V1alpha1().Pipelines(), - TaskRun: sharedInformer.Tekton().V1alpha1().TaskRuns(), - Task: sharedInformer.Tekton().V1alpha1().Tasks(), - ClusterTask: sharedInformer.Tekton().V1alpha1().ClusterTasks(), - PipelineResource: sharedInformer.Tekton().V1alpha1().PipelineResources(), - Pod: kubeInformer.Core().V1().Pods(), + PipelineRun: fakepipelineruninformer.Get(ctx), + Pipeline: fakepipelineinformer.Get(ctx), + TaskRun: faketaskruninformer.Get(ctx), + Task: faketaskinformer.Get(ctx), + ClusterTask: fakeclustertaskinformer.Get(ctx), + PipelineResource: fakeresourceinformer.Get(ctx), + Condition: fakeconditioninformer.Get(ctx), + Pod: fakepodinformer.Get(ctx), } for _, pr := range d.PipelineRuns { if err := i.PipelineRun.Informer().GetIndexer().Add(pr); err != nil { t.Fatal(err) } + if _, err := c.Pipeline.TektonV1alpha1().PipelineRuns(pr.Namespace).Create(pr); err != nil { + t.Fatal(err) + } } for _, p := range d.Pipelines { if err := i.Pipeline.Informer().GetIndexer().Add(p); err != nil { t.Fatal(err) } + if _, err := c.Pipeline.TektonV1alpha1().Pipelines(p.Namespace).Create(p); err != nil { + t.Fatal(err) + } } for _, tr := range d.TaskRuns { if err := i.TaskRun.Informer().GetIndexer().Add(tr); err != nil { t.Fatal(err) } + if _, err := c.Pipeline.TektonV1alpha1().TaskRuns(tr.Namespace).Create(tr); err != nil { + t.Fatal(err) + } } for _, ta := range d.Tasks { if err := i.Task.Informer().GetIndexer().Add(ta); err != nil { t.Fatal(err) } + if _, err := c.Pipeline.TektonV1alpha1().Tasks(ta.Namespace).Create(ta); err != nil { + t.Fatal(err) + } } for _, ct := range d.ClusterTasks { if err := i.ClusterTask.Informer().GetIndexer().Add(ct); err != nil { t.Fatal(err) } + if _, err := c.Pipeline.TektonV1alpha1().ClusterTasks().Create(ct); err != nil { + t.Fatal(err) + } } for _, r := range d.PipelineResources { if err := i.PipelineResource.Informer().GetIndexer().Add(r); err != nil { t.Fatal(err) } + if _, err := c.Pipeline.TektonV1alpha1().PipelineResources(r.Namespace).Create(r); err != nil { + t.Fatal(err) + } + } + for _, cond := range d.Conditions { + if err := i.Condition.Informer().GetIndexer().Add(cond); err != nil { + t.Fatal(err) + } + if _, err := c.Pipeline.TektonV1alpha1().Conditions(cond.Namespace).Create(cond); err != nil { + t.Fatal(err) + } } for _, p := range d.Pods { if err := i.Pod.Informer().GetIndexer().Add(p); err != nil { t.Fatal(err) } + if _, err := c.Kube.CoreV1().Pods(p.Namespace).Create(p); err != nil { + t.Fatal(err) + } + } + for _, n := range d.Namespaces { + if _, err := c.Kube.CoreV1().Namespaces().Create(n); err != nil { + t.Fatal(err) + } } + c.Pipeline.ClearActions() + c.Kube.ClearActions() return c, i } diff --git a/vendor/github.com/tektoncd/pipeline/test/e2e-common.sh b/vendor/github.com/tektoncd/pipeline/test/e2e-common.sh index d503f385f..1dec12d9d 100644 --- a/vendor/github.com/tektoncd/pipeline/test/e2e-common.sh +++ b/vendor/github.com/tektoncd/pipeline/test/e2e-common.sh @@ -123,7 +123,7 @@ function install_pipeline_crd() { ko apply -f config/ || fail_test "Build pipeline installation failed" # Make sure thateveything is cleaned up in the current namespace. - for res in pipelineresources tasks pipelines taskruns pipelineruns; do + for res in conditions pipelineresources tasks pipelines taskruns pipelineruns; do kubectl delete --ignore-not-found=true ${res}.tekton.dev --all done diff --git a/vendor/github.com/tektoncd/pipeline/test/secret.go b/vendor/github.com/tektoncd/pipeline/test/secret.go index a0089fc85..63985c903 100644 --- a/vendor/github.com/tektoncd/pipeline/test/secret.go +++ b/vendor/github.com/tektoncd/pipeline/test/secret.go @@ -1,11 +1,14 @@ // +build e2e /* -Copyright 2019 Tekton Authors LLC +Copyright 2019 The Tekton Authors + Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -20,10 +23,10 @@ import ( "os" "testing" - knativetest "github.com/knative/pkg/test" "golang.org/x/xerrors" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + knativetest "knative.dev/pkg/test" ) // CreateGCPServiceAccountSecret will create a kube secret called secretName in namespace diff --git a/vendor/github.com/tektoncd/pipeline/test/wait.go b/vendor/github.com/tektoncd/pipeline/test/wait.go index 4c385e593..5eb6935c0 100644 --- a/vendor/github.com/tektoncd/pipeline/test/wait.go +++ b/vendor/github.com/tektoncd/pipeline/test/wait.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Tekton Authors. +Copyright 2019 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -48,13 +48,13 @@ import ( "fmt" "time" - "github.com/knative/pkg/apis" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" "go.opencensus.io/trace" "golang.org/x/xerrors" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" + "knative.dev/pkg/apis" ) const ( diff --git a/vendor/go.opencensus.io/Makefile b/vendor/go.opencensus.io/Makefile index e2f2ed59e..457866cb1 100644 --- a/vendor/go.opencensus.io/Makefile +++ b/vendor/go.opencensus.io/Makefile @@ -15,6 +15,7 @@ EMBEDMD=embedmd # TODO decide if we need to change these names. TRACE_ID_LINT_EXCEPTION="type name will be used as trace.TraceID by other packages" TRACE_OPTION_LINT_EXCEPTION="type name will be used as trace.TraceOptions by other packages" +README_FILES := $(shell find . -name '*README.md' | sort | tr '\n' ' ') .DEFAULT_GOAL := fmt-lint-vet-embedmd-test @@ -79,7 +80,7 @@ vet: .PHONY: embedmd embedmd: - @EMBEDMDOUT=`$(EMBEDMD) -d README.md 2>&1`; \ + @EMBEDMDOUT=`$(EMBEDMD) -d $(README_FILES) 2>&1`; \ if [ "$$EMBEDMDOUT" ]; then \ echo "$(EMBEDMD) FAILED => embedmd the following files:\n"; \ echo "$$EMBEDMDOUT\n"; \ diff --git a/vendor/go.opencensus.io/exemplar/exemplar.go b/vendor/go.opencensus.io/exemplar/exemplar.go deleted file mode 100644 index acc225af9..000000000 --- a/vendor/go.opencensus.io/exemplar/exemplar.go +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package exemplar implements support for exemplars. Exemplars are additional -// data associated with each measurement. -// -// Their purpose it to provide an example of the kind of thing -// (request, RPC, trace span, etc.) that resulted in that measurement. -package exemplar - -import ( - "context" - "time" -) - -// Exemplars keys. -const ( - KeyTraceID = "trace_id" - KeySpanID = "span_id" - KeyPrefixTag = "tag:" -) - -// Exemplar is an example data point associated with each bucket of a -// distribution type aggregation. -type Exemplar struct { - Value float64 // the value that was recorded - Timestamp time.Time // the time the value was recorded - Attachments Attachments // attachments (if any) -} - -// Attachments is a map of extra values associated with a recorded data point. -// The map should only be mutated from AttachmentExtractor functions. -type Attachments map[string]string - -// AttachmentExtractor is a function capable of extracting exemplar attachments -// from the context used to record measurements. -// The map passed to the function should be mutated and returned. It will -// initially be nil: the first AttachmentExtractor that would like to add keys to the -// map is responsible for initializing it. -type AttachmentExtractor func(ctx context.Context, a Attachments) Attachments - -var extractors []AttachmentExtractor - -// RegisterAttachmentExtractor registers the given extractor associated with the exemplar -// type name. -// -// Extractors will be used to attempt to extract exemplars from the context -// associated with each recorded measurement. -// -// Packages that support exemplars should register their extractor functions on -// initialization. -// -// RegisterAttachmentExtractor should not be called after any measurements have -// been recorded. -func RegisterAttachmentExtractor(e AttachmentExtractor) { - extractors = append(extractors, e) -} - -// AttachmentsFromContext extracts exemplars from the given context. -// Each registered AttachmentExtractor (see RegisterAttachmentExtractor) is called in an -// unspecified order to add attachments to the exemplar. -func AttachmentsFromContext(ctx context.Context) Attachments { - var a Attachments - for _, extractor := range extractors { - a = extractor(ctx, a) - } - return a -} diff --git a/vendor/go.opencensus.io/go.mod b/vendor/go.opencensus.io/go.mod index b59bf6c13..8b7d38e91 100644 --- a/vendor/go.opencensus.io/go.mod +++ b/vendor/go.opencensus.io/go.mod @@ -1,13 +1,10 @@ module go.opencensus.io require ( - github.com/apache/thrift v0.12.0 github.com/golang/protobuf v1.2.0 github.com/google/go-cmp v0.2.0 github.com/hashicorp/golang-lru v0.5.0 - github.com/openzipkin/zipkin-go v0.1.6 - github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829 golang.org/x/net v0.0.0-20190311183353-d8887717615a - google.golang.org/api v0.2.0 + google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19 // indirect google.golang.org/grpc v1.19.0 ) diff --git a/vendor/go.opencensus.io/go.sum b/vendor/go.opencensus.io/go.sum index 300602f17..cbb37036d 100644 --- a/vendor/go.opencensus.io/go.sum +++ b/vendor/go.opencensus.io/go.sum @@ -1,150 +1,50 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -git.apache.org/thrift.git v0.12.0/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= -github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/apache/thrift v0.12.0 h1:pODnxUFNcjP9UTLZGTdeh+j16A8lJbRvD3rOtrk/7bs= -github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= -github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= -github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/gorilla/context v1.1.1 h1:AWwleXJkX/nhcU9bZSnZoi3h/qGYqQAGhq6zZe/aQW8= -github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= -github.com/gorilla/mux v1.6.2 h1:Pgr17XVTNXAk3q/r4CpKzC5xBM/qW1uVLV+IhRZpIIk= -github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/grpc-ecosystem/grpc-gateway v1.6.2/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= +github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/openzipkin/zipkin-go v0.1.3 h1:36hTtUTQR/vPX7YVJo2PYexSbHdAJiAkDrjuXw/YlYQ= -github.com/openzipkin/zipkin-go v0.1.3/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= -github.com/openzipkin/zipkin-go v0.1.6 h1:yXiysv1CSK7Q5yjGy1710zZGnsbMUIjluWBxtLXHPBo= -github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= -github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829 h1:D+CiwcpGTW6pL6bv6KI3KbyEyCKyS+1JWS2h8PNDnGA= -github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f h1:BVwpUVJDADN2ufcGik7W992pyps0wZ888b/y9GXcLTU= -github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/common v0.2.0 h1:kUZDBDTdBVBYBj5Tmh2NZLlF60mfjA27rM34b+cVwNU= -github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1 h1:/K3IL0Z1quvmJ7X0A1AwNEK7CRkVK3YwfOU/QAL4WGg= -github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -go.opencensus.io v0.19.1/go.mod h1:gug0GbSHa8Pafr0d2urOSgoXHZ6x/RUlaiT0d9pqb4A= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20181217174547-8f45f776aaf1/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3 h1:ulvT7fqt0yHWzpJwI57MezWnYDVpCAYBVuYst/L+fAY= -golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f h1:Bl/8QSvNqXvPGPGXa2z5xUTmV7VDcZyvRZ+QQXkXTZQ= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6 h1:bjcUS9ztw9kFmmIxJInhon/0Is3p+EHBKNgquIzo1OI= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181218192612-074acd46bca6 h1:MXtOG7w2ND9qNCUZSDBGll/SpVIq7ftozR9I8/JGBHY= -golang.org/x/sys v0.0.0-20181218192612-074acd46bca6/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181219222714-6e267b5cc78e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -google.golang.org/api v0.0.0-20181220000619-583d854617af h1:iQMS7JKv/0w/iiWf1M49Cg3dmOkBoBZT5KheqPDpaac= -google.golang.org/api v0.0.0-20181220000619-583d854617af/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= -google.golang.org/api v0.2.0 h1:B5VXkdjt7K2Gm6fGBC9C9a1OAKJDT95cTqwet+2zib0= -google.golang.org/api v0.2.0/go.mod h1:IfRCZScioGtypHNTlz3gFk67J8uePVW7uDTBzXuIkhU= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20181219182458-5a97ab628bfb h1:dQshZyyJ5W/Xk8myF4GKBak1pZW6EywJuQ8+44EQhGA= -google.golang.org/genproto v0.0.0-20181219182458-5a97ab628bfb/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19 h1:Lj2SnHtxkRGJDqnGaSjo+CCdIieEnwVazbOXILwQemk= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= -google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0 h1:cfg4PD8YEdSFnm7qLV4++93WcmhH2nIUhMjhdCvl3j8= +google.golang.org/grpc v1.19.0 h1:cfg4PD8YEdSFnm7qLV4++93WcmhH2nIUhMjhdCvl3j8= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20180920025451-e3ad64cb4ed3/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/vendor/go.opencensus.io/metric/metricdata/doc.go b/vendor/go.opencensus.io/metric/metricdata/doc.go new file mode 100644 index 000000000..52a7b3bf8 --- /dev/null +++ b/vendor/go.opencensus.io/metric/metricdata/doc.go @@ -0,0 +1,19 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package metricdata contains the metrics data model. +// +// This is an EXPERIMENTAL package, and may change in arbitrary ways without +// notice. +package metricdata // import "go.opencensus.io/metric/metricdata" diff --git a/vendor/go.opencensus.io/metric/metricdata/exemplar.go b/vendor/go.opencensus.io/metric/metricdata/exemplar.go new file mode 100644 index 000000000..12695ce2d --- /dev/null +++ b/vendor/go.opencensus.io/metric/metricdata/exemplar.go @@ -0,0 +1,38 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metricdata + +import ( + "time" +) + +// Exemplars keys. +const ( + AttachmentKeySpanContext = "SpanContext" +) + +// Exemplar is an example data point associated with each bucket of a +// distribution type aggregation. +// +// Their purpose is to provide an example of the kind of thing +// (request, RPC, trace span, etc.) that resulted in that measurement. +type Exemplar struct { + Value float64 // the value that was recorded + Timestamp time.Time // the time the value was recorded + Attachments Attachments // attachments (if any) +} + +// Attachments is a map of extra values associated with a recorded data point. +type Attachments map[string]interface{} diff --git a/vendor/go.opencensus.io/metric/metricdata/label.go b/vendor/go.opencensus.io/metric/metricdata/label.go new file mode 100644 index 000000000..aadae41e6 --- /dev/null +++ b/vendor/go.opencensus.io/metric/metricdata/label.go @@ -0,0 +1,35 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metricdata + +// LabelKey represents key of a label. It has optional +// description attribute. +type LabelKey struct { + Key string + Description string +} + +// LabelValue represents the value of a label. +// The zero value represents a missing label value, which may be treated +// differently to an empty string value by some back ends. +type LabelValue struct { + Value string // string value of the label + Present bool // flag that indicated whether a value is present or not +} + +// NewLabelValue creates a new non-nil LabelValue that represents the given string. +func NewLabelValue(val string) LabelValue { + return LabelValue{Value: val, Present: true} +} diff --git a/vendor/go.opencensus.io/metric/metricdata/metric.go b/vendor/go.opencensus.io/metric/metricdata/metric.go new file mode 100644 index 000000000..8293712c7 --- /dev/null +++ b/vendor/go.opencensus.io/metric/metricdata/metric.go @@ -0,0 +1,46 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metricdata + +import ( + "time" + + "go.opencensus.io/resource" +) + +// Descriptor holds metadata about a metric. +type Descriptor struct { + Name string // full name of the metric + Description string // human-readable description + Unit Unit // units for the measure + Type Type // type of measure + LabelKeys []LabelKey // label keys +} + +// Metric represents a quantity measured against a resource with different +// label value combinations. +type Metric struct { + Descriptor Descriptor // metric descriptor + Resource *resource.Resource // resource against which this was measured + TimeSeries []*TimeSeries // one time series for each combination of label values +} + +// TimeSeries is a sequence of points associated with a combination of label +// values. +type TimeSeries struct { + LabelValues []LabelValue // label values, same order as keys in the metric descriptor + Points []Point // points sequence + StartTime time.Time // time we started recording this time series +} diff --git a/vendor/go.opencensus.io/metric/metricdata/point.go b/vendor/go.opencensus.io/metric/metricdata/point.go new file mode 100644 index 000000000..7fe057b19 --- /dev/null +++ b/vendor/go.opencensus.io/metric/metricdata/point.go @@ -0,0 +1,193 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metricdata + +import ( + "time" +) + +// Point is a single data point of a time series. +type Point struct { + // Time is the point in time that this point represents in a time series. + Time time.Time + // Value is the value of this point. Prefer using ReadValue to switching on + // the value type, since new value types might be added. + Value interface{} +} + +//go:generate stringer -type ValueType + +// NewFloat64Point creates a new Point holding a float64 value. +func NewFloat64Point(t time.Time, val float64) Point { + return Point{ + Value: val, + Time: t, + } +} + +// NewInt64Point creates a new Point holding an int64 value. +func NewInt64Point(t time.Time, val int64) Point { + return Point{ + Value: val, + Time: t, + } +} + +// NewDistributionPoint creates a new Point holding a Distribution value. +func NewDistributionPoint(t time.Time, val *Distribution) Point { + return Point{ + Value: val, + Time: t, + } +} + +// NewSummaryPoint creates a new Point holding a Summary value. +func NewSummaryPoint(t time.Time, val *Summary) Point { + return Point{ + Value: val, + Time: t, + } +} + +// ValueVisitor allows reading the value of a point. +type ValueVisitor interface { + VisitFloat64Value(float64) + VisitInt64Value(int64) + VisitDistributionValue(*Distribution) + VisitSummaryValue(*Summary) +} + +// ReadValue accepts a ValueVisitor and calls the appropriate method with the +// value of this point. +// Consumers of Point should use this in preference to switching on the type +// of the value directly, since new value types may be added. +func (p Point) ReadValue(vv ValueVisitor) { + switch v := p.Value.(type) { + case int64: + vv.VisitInt64Value(v) + case float64: + vv.VisitFloat64Value(v) + case *Distribution: + vv.VisitDistributionValue(v) + case *Summary: + vv.VisitSummaryValue(v) + default: + panic("unexpected value type") + } +} + +// Distribution contains summary statistics for a population of values. It +// optionally contains a histogram representing the distribution of those +// values across a set of buckets. +type Distribution struct { + // Count is the number of values in the population. Must be non-negative. This value + // must equal the sum of the values in bucket_counts if a histogram is + // provided. + Count int64 + // Sum is the sum of the values in the population. If count is zero then this field + // must be zero. + Sum float64 + // SumOfSquaredDeviation is the sum of squared deviations from the mean of the values in the + // population. For values x_i this is: + // + // Sum[i=1..n]((x_i - mean)^2) + // + // Knuth, "The Art of Computer Programming", Vol. 2, page 323, 3rd edition + // describes Welford's method for accumulating this sum in one pass. + // + // If count is zero then this field must be zero. + SumOfSquaredDeviation float64 + // BucketOptions describes the bounds of the histogram buckets in this + // distribution. + // + // A Distribution may optionally contain a histogram of the values in the + // population. + // + // If nil, there is no associated histogram. + BucketOptions *BucketOptions + // Bucket If the distribution does not have a histogram, then omit this field. + // If there is a histogram, then the sum of the values in the Bucket counts + // must equal the value in the count field of the distribution. + Buckets []Bucket +} + +// BucketOptions describes the bounds of the histogram buckets in this +// distribution. +type BucketOptions struct { + // Bounds specifies a set of bucket upper bounds. + // This defines len(bounds) + 1 (= N) buckets. The boundaries for bucket + // index i are: + // + // [0, Bounds[i]) for i == 0 + // [Bounds[i-1], Bounds[i]) for 0 < i < N-1 + // [Bounds[i-1], +infinity) for i == N-1 + Bounds []float64 +} + +// Bucket represents a single bucket (value range) in a distribution. +type Bucket struct { + // Count is the number of values in each bucket of the histogram, as described in + // bucket_bounds. + Count int64 + // Exemplar associated with this bucket (if any). + Exemplar *Exemplar +} + +// Summary is a representation of percentiles. +type Summary struct { + // Count is the cumulative count (if available). + Count int64 + // Sum is the cumulative sum of values (if available). + Sum float64 + // HasCountAndSum is true if Count and Sum are available. + HasCountAndSum bool + // Snapshot represents percentiles calculated over an arbitrary time window. + // The values in this struct can be reset at arbitrary unknown times, with + // the requirement that all of them are reset at the same time. + Snapshot Snapshot +} + +// Snapshot represents percentiles over an arbitrary time. +// The values in this struct can be reset at arbitrary unknown times, with +// the requirement that all of them are reset at the same time. +type Snapshot struct { + // Count is the number of values in the snapshot. Optional since some systems don't + // expose this. Set to 0 if not available. + Count int64 + // Sum is the sum of values in the snapshot. Optional since some systems don't + // expose this. If count is 0 then this field must be zero. + Sum float64 + // Percentiles is a map from percentile (range (0-100.0]) to the value of + // the percentile. + Percentiles map[float64]float64 +} + +//go:generate stringer -type Type + +// Type is the overall type of metric, including its value type and whether it +// represents a cumulative total (since the start time) or if it represents a +// gauge value. +type Type int + +// Metric types. +const ( + TypeGaugeInt64 Type = iota + TypeGaugeFloat64 + TypeGaugeDistribution + TypeCumulativeInt64 + TypeCumulativeFloat64 + TypeCumulativeDistribution + TypeSummary +) diff --git a/vendor/go.opencensus.io/metric/metricdata/type_string.go b/vendor/go.opencensus.io/metric/metricdata/type_string.go new file mode 100644 index 000000000..c3f8ec27b --- /dev/null +++ b/vendor/go.opencensus.io/metric/metricdata/type_string.go @@ -0,0 +1,16 @@ +// Code generated by "stringer -type Type"; DO NOT EDIT. + +package metricdata + +import "strconv" + +const _Type_name = "TypeGaugeInt64TypeGaugeFloat64TypeGaugeDistributionTypeCumulativeInt64TypeCumulativeFloat64TypeCumulativeDistributionTypeSummary" + +var _Type_index = [...]uint8{0, 14, 30, 51, 70, 91, 117, 128} + +func (i Type) String() string { + if i < 0 || i >= Type(len(_Type_index)-1) { + return "Type(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _Type_name[_Type_index[i]:_Type_index[i+1]] +} diff --git a/vendor/go.opencensus.io/trace/exemplar.go b/vendor/go.opencensus.io/metric/metricdata/unit.go similarity index 52% rename from vendor/go.opencensus.io/trace/exemplar.go rename to vendor/go.opencensus.io/metric/metricdata/unit.go index 416d80590..b483a1371 100644 --- a/vendor/go.opencensus.io/trace/exemplar.go +++ b/vendor/go.opencensus.io/metric/metricdata/unit.go @@ -12,32 +12,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -package trace +package metricdata -import ( - "context" - "encoding/hex" +// Unit is a string encoded according to the case-sensitive abbreviations from the +// Unified Code for Units of Measure: http://unitsofmeasure.org/ucum.html +type Unit string - "go.opencensus.io/exemplar" +// Predefined units. To record against a unit not represented here, create your +// own Unit type constant from a string. +const ( + UnitDimensionless Unit = "1" + UnitBytes Unit = "By" + UnitMilliseconds Unit = "ms" ) - -func init() { - exemplar.RegisterAttachmentExtractor(attachSpanContext) -} - -func attachSpanContext(ctx context.Context, a exemplar.Attachments) exemplar.Attachments { - span := FromContext(ctx) - if span == nil { - return a - } - sc := span.SpanContext() - if !sc.IsSampled() { - return a - } - if a == nil { - a = make(exemplar.Attachments) - } - a[exemplar.KeyTraceID] = hex.EncodeToString(sc.TraceID[:]) - a[exemplar.KeySpanID] = hex.EncodeToString(sc.SpanID[:]) - return a -} diff --git a/vendor/go.opencensus.io/metric/metricexport/doc.go b/vendor/go.opencensus.io/metric/metricexport/doc.go new file mode 100644 index 000000000..df632a792 --- /dev/null +++ b/vendor/go.opencensus.io/metric/metricexport/doc.go @@ -0,0 +1,19 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package metricexport contains support for exporting metric data. +// +// This is an EXPERIMENTAL package, and may change in arbitrary ways without +// notice. +package metricexport // import "go.opencensus.io/metric/metricexport" diff --git a/vendor/go.opencensus.io/metric/metricexport/export.go b/vendor/go.opencensus.io/metric/metricexport/export.go new file mode 100644 index 000000000..23f4a864a --- /dev/null +++ b/vendor/go.opencensus.io/metric/metricexport/export.go @@ -0,0 +1,26 @@ +// Copyright 2019, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metricexport + +import ( + "context" + + "go.opencensus.io/metric/metricdata" +) + +// Exporter is an interface that exporters implement to export the metric data. +type Exporter interface { + ExportMetrics(ctx context.Context, data []*metricdata.Metric) error +} diff --git a/vendor/go.opencensus.io/metric/metricexport/reader.go b/vendor/go.opencensus.io/metric/metricexport/reader.go new file mode 100644 index 000000000..44ace7008 --- /dev/null +++ b/vendor/go.opencensus.io/metric/metricexport/reader.go @@ -0,0 +1,187 @@ +// Copyright 2019, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package metricexport + +import ( + "fmt" + "time" + + "context" + "go.opencensus.io/metric/metricdata" + "go.opencensus.io/metric/metricproducer" + "go.opencensus.io/trace" + "sync" +) + +var ( + defaultSampler = trace.ProbabilitySampler(0.0001) + errReportingIntervalTooLow = fmt.Errorf("reporting interval less than %d", minimumReportingDuration) + errAlreadyStarted = fmt.Errorf("already started") + errIntervalReaderNil = fmt.Errorf("interval reader is nil") + errExporterNil = fmt.Errorf("exporter is nil") + errReaderNil = fmt.Errorf("reader is nil") +) + +const ( + defaultReportingDuration = 60 * time.Second + minimumReportingDuration = 1 * time.Second + defaultSpanName = "ExportMetrics" +) + +// ReaderOptions contains options pertaining to metrics reader. +type ReaderOptions struct { + // SpanName is the name used for span created to export metrics. + SpanName string +} + +// Reader reads metrics from all producers registered +// with producer manager and exports those metrics using provided +// exporter. +type Reader struct { + sampler trace.Sampler + + spanName string +} + +// IntervalReader periodically reads metrics from all producers registered +// with producer manager and exports those metrics using provided +// exporter. Call Reader.Stop() to stop the reader. +type IntervalReader struct { + // ReportingInterval it the time duration between two consecutive + // metrics reporting. defaultReportingDuration is used if it is not set. + // It cannot be set lower than minimumReportingDuration. + ReportingInterval time.Duration + + exporter Exporter + timer *time.Ticker + quit, done chan bool + mu sync.RWMutex + reader *Reader +} + +// ReaderOption apply changes to ReaderOptions. +type ReaderOption func(*ReaderOptions) + +// WithSpanName makes new reader to use given span name when exporting metrics. +func WithSpanName(spanName string) ReaderOption { + return func(o *ReaderOptions) { + o.SpanName = spanName + } +} + +// NewReader returns a reader configured with specified options. +func NewReader(o ...ReaderOption) *Reader { + var opts ReaderOptions + for _, op := range o { + op(&opts) + } + reader := &Reader{defaultSampler, defaultSpanName} + if opts.SpanName != "" { + reader.spanName = opts.SpanName + } + return reader +} + +// NewIntervalReader creates a reader. Once started it periodically +// reads metrics from all producers and exports them using provided exporter. +func NewIntervalReader(reader *Reader, exporter Exporter) (*IntervalReader, error) { + if exporter == nil { + return nil, errExporterNil + } + if reader == nil { + return nil, errReaderNil + } + + r := &IntervalReader{ + exporter: exporter, + reader: reader, + } + return r, nil +} + +// Start starts the IntervalReader which periodically reads metrics from all +// producers registered with global producer manager. If the reporting interval +// is not set prior to calling this function then default reporting interval +// is used. +func (ir *IntervalReader) Start() error { + if ir == nil { + return errIntervalReaderNil + } + ir.mu.Lock() + defer ir.mu.Unlock() + var reportingInterval = defaultReportingDuration + if ir.ReportingInterval != 0 { + if ir.ReportingInterval < minimumReportingDuration { + return errReportingIntervalTooLow + } + reportingInterval = ir.ReportingInterval + } + + if ir.done != nil { + return errAlreadyStarted + } + ir.timer = time.NewTicker(reportingInterval) + ir.quit = make(chan bool) + ir.done = make(chan bool) + + go ir.startInternal() + return nil +} + +func (ir *IntervalReader) startInternal() { + for { + select { + case <-ir.timer.C: + ir.reader.ReadAndExport(ir.exporter) + case <-ir.quit: + ir.timer.Stop() + ir.done <- true + return + } + } +} + +// Stop stops the reader from reading and exporting metrics. +// Additional call to Stop are no-ops. +func (ir *IntervalReader) Stop() { + if ir == nil { + return + } + ir.mu.Lock() + defer ir.mu.Unlock() + if ir.quit == nil { + return + } + ir.quit <- true + <-ir.done + close(ir.quit) + close(ir.done) + ir.quit = nil +} + +// ReadAndExport reads metrics from all producer registered with +// producer manager and then exports them using provided exporter. +func (r *Reader) ReadAndExport(exporter Exporter) { + ctx, span := trace.StartSpan(context.Background(), r.spanName, trace.WithSampler(r.sampler)) + defer span.End() + producers := metricproducer.GlobalManager().GetAll() + data := []*metricdata.Metric{} + for _, producer := range producers { + data = append(data, producer.Read()...) + } + // TODO: [rghetia] add metrics for errors. + exporter.ExportMetrics(ctx, data) +} diff --git a/vendor/go.opencensus.io/metric/metricproducer/manager.go b/vendor/go.opencensus.io/metric/metricproducer/manager.go new file mode 100644 index 000000000..ca1f39049 --- /dev/null +++ b/vendor/go.opencensus.io/metric/metricproducer/manager.go @@ -0,0 +1,78 @@ +// Copyright 2019, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metricproducer + +import ( + "sync" +) + +// Manager maintains a list of active producers. Producers can register +// with the manager to allow readers to read all metrics provided by them. +// Readers can retrieve all producers registered with the manager, +// read metrics from the producers and export them. +type Manager struct { + mu sync.RWMutex + producers map[Producer]struct{} +} + +var prodMgr *Manager +var once sync.Once + +// GlobalManager is a single instance of producer manager +// that is used by all producers and all readers. +func GlobalManager() *Manager { + once.Do(func() { + prodMgr = &Manager{} + prodMgr.producers = make(map[Producer]struct{}) + }) + return prodMgr +} + +// AddProducer adds the producer to the Manager if it is not already present. +func (pm *Manager) AddProducer(producer Producer) { + if producer == nil { + return + } + pm.mu.Lock() + defer pm.mu.Unlock() + pm.producers[producer] = struct{}{} +} + +// DeleteProducer deletes the producer from the Manager if it is present. +func (pm *Manager) DeleteProducer(producer Producer) { + if producer == nil { + return + } + pm.mu.Lock() + defer pm.mu.Unlock() + delete(pm.producers, producer) +} + +// GetAll returns a slice of all producer currently registered with +// the Manager. For each call it generates a new slice. The slice +// should not be cached as registration may change at any time. It is +// typically called periodically by exporter to read metrics from +// the producers. +func (pm *Manager) GetAll() []Producer { + pm.mu.Lock() + defer pm.mu.Unlock() + producers := make([]Producer, len(pm.producers)) + i := 0 + for producer := range pm.producers { + producers[i] = producer + i++ + } + return producers +} diff --git a/vendor/go.opencensus.io/metric/metricproducer/producer.go b/vendor/go.opencensus.io/metric/metricproducer/producer.go new file mode 100644 index 000000000..6cee9ed17 --- /dev/null +++ b/vendor/go.opencensus.io/metric/metricproducer/producer.go @@ -0,0 +1,28 @@ +// Copyright 2019, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metricproducer + +import ( + "go.opencensus.io/metric/metricdata" +) + +// Producer is a source of metrics. +type Producer interface { + // Read should return the current values of all metrics supported by this + // metric provider. + // The returned metrics should be unique for each combination of name and + // resource. + Read() []*metricdata.Metric +} diff --git a/vendor/go.opencensus.io/opencensus.go b/vendor/go.opencensus.io/opencensus.go index a52dcd8c6..d2565f1e2 100644 --- a/vendor/go.opencensus.io/opencensus.go +++ b/vendor/go.opencensus.io/opencensus.go @@ -17,5 +17,5 @@ package opencensus // import "go.opencensus.io" // Version is the current release version of OpenCensus in use. func Version() string { - return "0.20.0" + return "0.21.0" } diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go b/vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go index e9991fe0f..0ae569182 100644 --- a/vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go +++ b/vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go @@ -22,9 +22,11 @@ import ( "sync/atomic" "time" + "go.opencensus.io/metric/metricdata" ocstats "go.opencensus.io/stats" "go.opencensus.io/stats/view" "go.opencensus.io/tag" + "go.opencensus.io/trace" "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/stats" @@ -141,27 +143,31 @@ func handleRPCEnd(ctx context.Context, s *stats.End) { } latencyMillis := float64(elapsedTime) / float64(time.Millisecond) + attachments := getSpanCtxAttachment(ctx) if s.Client { - ocstats.RecordWithTags(ctx, - []tag.Mutator{ + ocstats.RecordWithOptions(ctx, + ocstats.WithTags( tag.Upsert(KeyClientMethod, methodName(d.method)), - tag.Upsert(KeyClientStatus, st), - }, - ClientSentBytesPerRPC.M(atomic.LoadInt64(&d.sentBytes)), - ClientSentMessagesPerRPC.M(atomic.LoadInt64(&d.sentCount)), - ClientReceivedMessagesPerRPC.M(atomic.LoadInt64(&d.recvCount)), - ClientReceivedBytesPerRPC.M(atomic.LoadInt64(&d.recvBytes)), - ClientRoundtripLatency.M(latencyMillis)) + tag.Upsert(KeyClientStatus, st)), + ocstats.WithAttachments(attachments), + ocstats.WithMeasurements( + ClientSentBytesPerRPC.M(atomic.LoadInt64(&d.sentBytes)), + ClientSentMessagesPerRPC.M(atomic.LoadInt64(&d.sentCount)), + ClientReceivedMessagesPerRPC.M(atomic.LoadInt64(&d.recvCount)), + ClientReceivedBytesPerRPC.M(atomic.LoadInt64(&d.recvBytes)), + ClientRoundtripLatency.M(latencyMillis))) } else { - ocstats.RecordWithTags(ctx, - []tag.Mutator{ + ocstats.RecordWithOptions(ctx, + ocstats.WithTags( tag.Upsert(KeyServerStatus, st), - }, - ServerSentBytesPerRPC.M(atomic.LoadInt64(&d.sentBytes)), - ServerSentMessagesPerRPC.M(atomic.LoadInt64(&d.sentCount)), - ServerReceivedMessagesPerRPC.M(atomic.LoadInt64(&d.recvCount)), - ServerReceivedBytesPerRPC.M(atomic.LoadInt64(&d.recvBytes)), - ServerLatency.M(latencyMillis)) + ), + ocstats.WithAttachments(attachments), + ocstats.WithMeasurements( + ServerSentBytesPerRPC.M(atomic.LoadInt64(&d.sentBytes)), + ServerSentMessagesPerRPC.M(atomic.LoadInt64(&d.sentCount)), + ServerReceivedMessagesPerRPC.M(atomic.LoadInt64(&d.recvCount)), + ServerReceivedBytesPerRPC.M(atomic.LoadInt64(&d.recvBytes)), + ServerLatency.M(latencyMillis))) } } @@ -206,3 +212,16 @@ func statusCodeToString(s *status.Status) string { return "CODE_" + strconv.FormatInt(int64(c), 10) } } + +func getSpanCtxAttachment(ctx context.Context) metricdata.Attachments { + attachments := map[string]interface{}{} + span := trace.FromContext(ctx) + if span == nil { + return attachments + } + spanCtx := span.SpanContext() + if spanCtx.IsSampled() { + attachments[metricdata.AttachmentKeySpanContext] = spanCtx + } + return attachments +} diff --git a/vendor/go.opencensus.io/plugin/ochttp/client_stats.go b/vendor/go.opencensus.io/plugin/ochttp/client_stats.go index e258bcc2a..17142aabe 100644 --- a/vendor/go.opencensus.io/plugin/ochttp/client_stats.go +++ b/vendor/go.opencensus.io/plugin/ochttp/client_stats.go @@ -68,7 +68,7 @@ func (t statsTransport) RoundTrip(req *http.Request) (*http.Response, error) { track.end() } else { track.body = resp.Body - resp.Body = track + resp.Body = wrappedBody(track, resp.Body) } } return resp, err diff --git a/vendor/go.opencensus.io/plugin/ochttp/server.go b/vendor/go.opencensus.io/plugin/ochttp/server.go index 5fe15e89f..4f6404fa7 100644 --- a/vendor/go.opencensus.io/plugin/ochttp/server.go +++ b/vendor/go.opencensus.io/plugin/ochttp/server.go @@ -124,6 +124,12 @@ func (h *Handler) startTrace(w http.ResponseWriter, r *http.Request) (*http.Requ } } span.AddAttributes(requestAttrs(r)...) + if r.Body == nil { + // TODO: Handle cases where ContentLength is not set. + } else if r.ContentLength > 0 { + span.AddMessageReceiveEvent(0, /* TODO: messageID */ + int64(r.ContentLength), -1) + } return r.WithContext(ctx), span.End } @@ -201,6 +207,9 @@ func (t *trackingResponseWriter) Header() http.Header { func (t *trackingResponseWriter) Write(data []byte) (int, error) { n, err := t.writer.Write(data) t.respSize += int64(n) + // Add message event for request bytes sent. + span := trace.FromContext(t.ctx) + span.AddMessageSendEvent(0 /* TODO: messageID */, int64(n), -1) return n, err } diff --git a/vendor/go.opencensus.io/plugin/ochttp/trace.go b/vendor/go.opencensus.io/plugin/ochttp/trace.go index ca312fcf4..c23b97fb1 100644 --- a/vendor/go.opencensus.io/plugin/ochttp/trace.go +++ b/vendor/go.opencensus.io/plugin/ochttp/trace.go @@ -34,6 +34,7 @@ const ( HostAttribute = "http.host" MethodAttribute = "http.method" PathAttribute = "http.path" + URLAttribute = "http.url" UserAgentAttribute = "http.user_agent" StatusCodeAttribute = "http.status_code" ) @@ -93,7 +94,8 @@ func (t *traceTransport) RoundTrip(req *http.Request) (*http.Response, error) { // span.End() will be invoked after // a read from resp.Body returns io.EOF or when // resp.Body.Close() is invoked. - resp.Body = &bodyTracker{rc: resp.Body, span: span} + bt := &bodyTracker{rc: resp.Body, span: span} + resp.Body = wrappedBody(bt, resp.Body) return resp, err } @@ -149,12 +151,21 @@ func spanNameFromURL(req *http.Request) string { } func requestAttrs(r *http.Request) []trace.Attribute { - return []trace.Attribute{ + userAgent := r.UserAgent() + + attrs := make([]trace.Attribute, 0, 5) + attrs = append(attrs, trace.StringAttribute(PathAttribute, r.URL.Path), + trace.StringAttribute(URLAttribute, r.URL.String()), trace.StringAttribute(HostAttribute, r.Host), trace.StringAttribute(MethodAttribute, r.Method), - trace.StringAttribute(UserAgentAttribute, r.UserAgent()), + ) + + if userAgent != "" { + attrs = append(attrs, trace.StringAttribute(UserAgentAttribute, userAgent)) } + + return attrs } func responseAttrs(resp *http.Response) []trace.Attribute { diff --git a/vendor/go.opencensus.io/plugin/ochttp/wrapped_body.go b/vendor/go.opencensus.io/plugin/ochttp/wrapped_body.go new file mode 100644 index 000000000..7d75cae2b --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ochttp/wrapped_body.go @@ -0,0 +1,44 @@ +// Copyright 2019, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ochttp + +import ( + "io" +) + +// wrappedBody returns a wrapped version of the original +// Body and only implements the same combination of additional +// interfaces as the original. +func wrappedBody(wrapper io.ReadCloser, body io.ReadCloser) io.ReadCloser { + var ( + wr, i0 = body.(io.Writer) + ) + switch { + case !i0: + return struct { + io.ReadCloser + }{wrapper} + + case i0: + return struct { + io.ReadCloser + io.Writer + }{wrapper, wr} + default: + return struct { + io.ReadCloser + }{wrapper} + } +} diff --git a/vendor/go.opencensus.io/resource/resource.go b/vendor/go.opencensus.io/resource/resource.go new file mode 100644 index 000000000..b1764e1d3 --- /dev/null +++ b/vendor/go.opencensus.io/resource/resource.go @@ -0,0 +1,164 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package resource provides functionality for resource, which capture +// identifying information about the entities for which signals are exported. +package resource + +import ( + "context" + "fmt" + "os" + "regexp" + "sort" + "strconv" + "strings" +) + +// Environment variables used by FromEnv to decode a resource. +const ( + EnvVarType = "OC_RESOURCE_TYPE" + EnvVarLabels = "OC_RESOURCE_LABELS" +) + +// Resource describes an entity about which identifying information and metadata is exposed. +// For example, a type "k8s.io/container" may hold labels describing the pod name and namespace. +type Resource struct { + Type string + Labels map[string]string +} + +// EncodeLabels encodes a labels map to a string as provided via the OC_RESOURCE_LABELS environment variable. +func EncodeLabels(labels map[string]string) string { + sortedKeys := make([]string, 0, len(labels)) + for k := range labels { + sortedKeys = append(sortedKeys, k) + } + sort.Strings(sortedKeys) + + s := "" + for i, k := range sortedKeys { + if i > 0 { + s += "," + } + s += k + "=" + strconv.Quote(labels[k]) + } + return s +} + +var labelRegex = regexp.MustCompile(`^\s*([[:ascii:]]{1,256}?)=("[[:ascii:]]{0,256}?")\s*,`) + +// DecodeLabels decodes a serialized label map as used in the OC_RESOURCE_LABELS variable. +// A list of labels of the form `="",="",...` is accepted. +// Domain names and paths are accepted as label keys. +// Most users will want to use FromEnv instead. +func DecodeLabels(s string) (map[string]string, error) { + m := map[string]string{} + // Ensure a trailing comma, which allows us to keep the regex simpler + s = strings.TrimRight(strings.TrimSpace(s), ",") + "," + + for len(s) > 0 { + match := labelRegex.FindStringSubmatch(s) + if len(match) == 0 { + return nil, fmt.Errorf("invalid label formatting, remainder: %s", s) + } + v := match[2] + if v == "" { + v = match[3] + } else { + var err error + if v, err = strconv.Unquote(v); err != nil { + return nil, fmt.Errorf("invalid label formatting, remainder: %s, err: %s", s, err) + } + } + m[match[1]] = v + + s = s[len(match[0]):] + } + return m, nil +} + +// FromEnv is a detector that loads resource information from the OC_RESOURCE_TYPE +// and OC_RESOURCE_labelS environment variables. +func FromEnv(context.Context) (*Resource, error) { + res := &Resource{ + Type: strings.TrimSpace(os.Getenv(EnvVarType)), + } + labels := strings.TrimSpace(os.Getenv(EnvVarLabels)) + if labels == "" { + return res, nil + } + var err error + if res.Labels, err = DecodeLabels(labels); err != nil { + return nil, err + } + return res, nil +} + +var _ Detector = FromEnv + +// merge resource information from b into a. In case of a collision, a takes precedence. +func merge(a, b *Resource) *Resource { + if a == nil { + return b + } + if b == nil { + return a + } + res := &Resource{ + Type: a.Type, + Labels: map[string]string{}, + } + if res.Type == "" { + res.Type = b.Type + } + for k, v := range b.Labels { + res.Labels[k] = v + } + // Labels from resource a overwrite labels from resource b. + for k, v := range a.Labels { + res.Labels[k] = v + } + return res +} + +// Detector attempts to detect resource information. +// If the detector cannot find resource information, the returned resource is nil but no +// error is returned. +// An error is only returned on unexpected failures. +type Detector func(context.Context) (*Resource, error) + +// MultiDetector returns a Detector that calls all input detectors in order and +// merges each result with the previous one. In case a type of label key is already set, +// the first set value is takes precedence. +// It returns on the first error that a sub-detector encounters. +func MultiDetector(detectors ...Detector) Detector { + return func(ctx context.Context) (*Resource, error) { + return detectAll(ctx, detectors...) + } +} + +// detectall calls all input detectors sequentially an merges each result with the previous one. +// It returns on the first error that a sub-detector encounters. +func detectAll(ctx context.Context, detectors ...Detector) (*Resource, error) { + var res *Resource + for _, d := range detectors { + r, err := d(ctx) + if err != nil { + return nil, err + } + res = merge(res, r) + } + return res, nil +} diff --git a/vendor/go.opencensus.io/stats/internal/record.go b/vendor/go.opencensus.io/stats/internal/record.go index ed5455205..36935e629 100644 --- a/vendor/go.opencensus.io/stats/internal/record.go +++ b/vendor/go.opencensus.io/stats/internal/record.go @@ -19,7 +19,7 @@ import ( ) // DefaultRecorder will be called for each Record call. -var DefaultRecorder func(tags *tag.Map, measurement interface{}, attachments map[string]string) +var DefaultRecorder func(tags *tag.Map, measurement interface{}, attachments map[string]interface{}) // SubscriptionReporter reports when a view subscribed with a measure. var SubscriptionReporter func(measure string) diff --git a/vendor/go.opencensus.io/stats/record.go b/vendor/go.opencensus.io/stats/record.go index 86f491e22..ad4691184 100644 --- a/vendor/go.opencensus.io/stats/record.go +++ b/vendor/go.opencensus.io/stats/record.go @@ -18,7 +18,7 @@ package stats import ( "context" - "go.opencensus.io/exemplar" + "go.opencensus.io/metric/metricdata" "go.opencensus.io/stats/internal" "go.opencensus.io/tag" ) @@ -31,39 +31,87 @@ func init() { } } +type recordOptions struct { + attachments metricdata.Attachments + mutators []tag.Mutator + measurements []Measurement +} + +// WithAttachments applies provided exemplar attachments. +func WithAttachments(attachments metricdata.Attachments) Options { + return func(ro *recordOptions) { + ro.attachments = attachments + } +} + +// WithTags applies provided tag mutators. +func WithTags(mutators ...tag.Mutator) Options { + return func(ro *recordOptions) { + ro.mutators = mutators + } +} + +// WithMeasurements applies provided measurements. +func WithMeasurements(measurements ...Measurement) Options { + return func(ro *recordOptions) { + ro.measurements = measurements + } +} + +// Options apply changes to recordOptions. +type Options func(*recordOptions) + +func createRecordOption(ros ...Options) *recordOptions { + o := &recordOptions{} + for _, ro := range ros { + ro(o) + } + return o +} + // Record records one or multiple measurements with the same context at once. // If there are any tags in the context, measurements will be tagged with them. func Record(ctx context.Context, ms ...Measurement) { + RecordWithOptions(ctx, WithMeasurements(ms...)) +} + +// RecordWithTags records one or multiple measurements at once. +// +// Measurements will be tagged with the tags in the context mutated by the mutators. +// RecordWithTags is useful if you want to record with tag mutations but don't want +// to propagate the mutations in the context. +func RecordWithTags(ctx context.Context, mutators []tag.Mutator, ms ...Measurement) error { + return RecordWithOptions(ctx, WithTags(mutators...), WithMeasurements(ms...)) +} + +// RecordWithOptions records measurements from the given options (if any) against context +// and tags and attachments in the options (if any). +// If there are any tags in the context, measurements will be tagged with them. +func RecordWithOptions(ctx context.Context, ros ...Options) error { + o := createRecordOption(ros...) + if len(o.measurements) == 0 { + return nil + } recorder := internal.DefaultRecorder if recorder == nil { - return - } - if len(ms) == 0 { - return + return nil } record := false - for _, m := range ms { + for _, m := range o.measurements { if m.desc.subscribed() { record = true break } } if !record { - return + return nil } - recorder(tag.FromContext(ctx), ms, exemplar.AttachmentsFromContext(ctx)) -} - -// RecordWithTags records one or multiple measurements at once. -// -// Measurements will be tagged with the tags in the context mutated by the mutators. -// RecordWithTags is useful if you want to record with tag mutations but don't want -// to propagate the mutations in the context. -func RecordWithTags(ctx context.Context, mutators []tag.Mutator, ms ...Measurement) error { - ctx, err := tag.New(ctx, mutators...) - if err != nil { - return err + if len(o.mutators) > 0 { + var err error + if ctx, err = tag.New(ctx, o.mutators...); err != nil { + return err + } } - Record(ctx, ms...) + recorder(tag.FromContext(ctx), o.measurements, o.attachments) return nil } diff --git a/vendor/go.opencensus.io/stats/view/aggregation_data.go b/vendor/go.opencensus.io/stats/view/aggregation_data.go index 960b94601..d500e67f7 100644 --- a/vendor/go.opencensus.io/stats/view/aggregation_data.go +++ b/vendor/go.opencensus.io/stats/view/aggregation_data.go @@ -17,8 +17,9 @@ package view import ( "math" + "time" - "go.opencensus.io/exemplar" + "go.opencensus.io/metric/metricdata" ) // AggregationData represents an aggregated value from a collection. @@ -26,9 +27,10 @@ import ( // Mosts users won't directly access aggregration data. type AggregationData interface { isAggregationData() bool - addSample(e *exemplar.Exemplar) + addSample(v float64, attachments map[string]interface{}, t time.Time) clone() AggregationData equal(other AggregationData) bool + toPoint(t metricdata.Type, time time.Time) metricdata.Point } const epsilon = 1e-9 @@ -43,7 +45,7 @@ type CountData struct { func (a *CountData) isAggregationData() bool { return true } -func (a *CountData) addSample(_ *exemplar.Exemplar) { +func (a *CountData) addSample(_ float64, _ map[string]interface{}, _ time.Time) { a.Value = a.Value + 1 } @@ -60,6 +62,15 @@ func (a *CountData) equal(other AggregationData) bool { return a.Value == a2.Value } +func (a *CountData) toPoint(metricType metricdata.Type, t time.Time) metricdata.Point { + switch metricType { + case metricdata.TypeCumulativeInt64: + return metricdata.NewInt64Point(t, a.Value) + default: + panic("unsupported metricdata.Type") + } +} + // SumData is the aggregated data for the Sum aggregation. // A sum aggregation processes data and sums up the recordings. // @@ -70,8 +81,8 @@ type SumData struct { func (a *SumData) isAggregationData() bool { return true } -func (a *SumData) addSample(e *exemplar.Exemplar) { - a.Value += e.Value +func (a *SumData) addSample(v float64, _ map[string]interface{}, _ time.Time) { + a.Value += v } func (a *SumData) clone() AggregationData { @@ -86,6 +97,17 @@ func (a *SumData) equal(other AggregationData) bool { return math.Pow(a.Value-a2.Value, 2) < epsilon } +func (a *SumData) toPoint(metricType metricdata.Type, t time.Time) metricdata.Point { + switch metricType { + case metricdata.TypeCumulativeInt64: + return metricdata.NewInt64Point(t, int64(a.Value)) + case metricdata.TypeCumulativeFloat64: + return metricdata.NewFloat64Point(t, a.Value) + default: + panic("unsupported metricdata.Type") + } +} + // DistributionData is the aggregated data for the // Distribution aggregation. // @@ -102,7 +124,7 @@ type DistributionData struct { CountPerBucket []int64 // number of occurrences per bucket // ExemplarsPerBucket is slice the same length as CountPerBucket containing // an exemplar for the associated bucket, or nil. - ExemplarsPerBucket []*exemplar.Exemplar + ExemplarsPerBucket []*metricdata.Exemplar bounds []float64 // histogram distribution of the values } @@ -110,7 +132,7 @@ func newDistributionData(bounds []float64) *DistributionData { bucketCount := len(bounds) + 1 return &DistributionData{ CountPerBucket: make([]int64, bucketCount), - ExemplarsPerBucket: make([]*exemplar.Exemplar, bucketCount), + ExemplarsPerBucket: make([]*metricdata.Exemplar, bucketCount), bounds: bounds, Min: math.MaxFloat64, Max: math.SmallestNonzeroFloat64, @@ -129,64 +151,62 @@ func (a *DistributionData) variance() float64 { func (a *DistributionData) isAggregationData() bool { return true } -func (a *DistributionData) addSample(e *exemplar.Exemplar) { - f := e.Value - if f < a.Min { - a.Min = f +// TODO(songy23): support exemplar attachments. +func (a *DistributionData) addSample(v float64, attachments map[string]interface{}, t time.Time) { + if v < a.Min { + a.Min = v } - if f > a.Max { - a.Max = f + if v > a.Max { + a.Max = v } a.Count++ - a.addToBucket(e) + a.addToBucket(v, attachments, t) if a.Count == 1 { - a.Mean = f + a.Mean = v return } oldMean := a.Mean - a.Mean = a.Mean + (f-a.Mean)/float64(a.Count) - a.SumOfSquaredDev = a.SumOfSquaredDev + (f-oldMean)*(f-a.Mean) + a.Mean = a.Mean + (v-a.Mean)/float64(a.Count) + a.SumOfSquaredDev = a.SumOfSquaredDev + (v-oldMean)*(v-a.Mean) } -func (a *DistributionData) addToBucket(e *exemplar.Exemplar) { +func (a *DistributionData) addToBucket(v float64, attachments map[string]interface{}, t time.Time) { var count *int64 - var ex **exemplar.Exemplar - for i, b := range a.bounds { - if e.Value < b { + var i int + var b float64 + for i, b = range a.bounds { + if v < b { count = &a.CountPerBucket[i] - ex = &a.ExemplarsPerBucket[i] break } } - if count == nil { - count = &a.CountPerBucket[len(a.bounds)] - ex = &a.ExemplarsPerBucket[len(a.bounds)] + if count == nil { // Last bucket. + i = len(a.bounds) + count = &a.CountPerBucket[i] } *count++ - *ex = maybeRetainExemplar(*ex, e) + if exemplar := getExemplar(v, attachments, t); exemplar != nil { + a.ExemplarsPerBucket[i] = exemplar + } } -func maybeRetainExemplar(old, cur *exemplar.Exemplar) *exemplar.Exemplar { - if old == nil { - return cur +func getExemplar(v float64, attachments map[string]interface{}, t time.Time) *metricdata.Exemplar { + if len(attachments) == 0 { + return nil } - - // Heuristic to pick the "better" exemplar: first keep the one with a - // sampled trace attachment, if neither have a trace attachment, pick the - // one with more attachments. - _, haveTraceID := cur.Attachments[exemplar.KeyTraceID] - if haveTraceID || len(cur.Attachments) >= len(old.Attachments) { - return cur + return &metricdata.Exemplar{ + Value: v, + Timestamp: t, + Attachments: attachments, } - return old } func (a *DistributionData) clone() AggregationData { c := *a c.CountPerBucket = append([]int64(nil), a.CountPerBucket...) - c.ExemplarsPerBucket = append([]*exemplar.Exemplar(nil), a.ExemplarsPerBucket...) + c.ExemplarsPerBucket = append([]*metricdata.Exemplar(nil), a.ExemplarsPerBucket...) return &c } @@ -209,6 +229,33 @@ func (a *DistributionData) equal(other AggregationData) bool { return a.Count == a2.Count && a.Min == a2.Min && a.Max == a2.Max && math.Pow(a.Mean-a2.Mean, 2) < epsilon && math.Pow(a.variance()-a2.variance(), 2) < epsilon } +func (a *DistributionData) toPoint(metricType metricdata.Type, t time.Time) metricdata.Point { + switch metricType { + case metricdata.TypeCumulativeDistribution: + buckets := []metricdata.Bucket{} + for i := 0; i < len(a.CountPerBucket); i++ { + buckets = append(buckets, metricdata.Bucket{ + Count: a.CountPerBucket[i], + Exemplar: a.ExemplarsPerBucket[i], + }) + } + bucketOptions := &metricdata.BucketOptions{Bounds: a.bounds} + + val := &metricdata.Distribution{ + Count: a.Count, + Sum: a.Sum(), + SumOfSquaredDeviation: a.SumOfSquaredDev, + BucketOptions: bucketOptions, + Buckets: buckets, + } + return metricdata.NewDistributionPoint(t, val) + + default: + // TODO: [rghetia] when we have a use case for TypeGaugeDistribution. + panic("unsupported metricdata.Type") + } +} + // LastValueData returns the last value recorded for LastValue aggregation. type LastValueData struct { Value float64 @@ -218,8 +265,8 @@ func (l *LastValueData) isAggregationData() bool { return true } -func (l *LastValueData) addSample(e *exemplar.Exemplar) { - l.Value = e.Value +func (l *LastValueData) addSample(v float64, _ map[string]interface{}, _ time.Time) { + l.Value = v } func (l *LastValueData) clone() AggregationData { @@ -233,3 +280,14 @@ func (l *LastValueData) equal(other AggregationData) bool { } return l.Value == a2.Value } + +func (l *LastValueData) toPoint(metricType metricdata.Type, t time.Time) metricdata.Point { + switch metricType { + case metricdata.TypeGaugeInt64: + return metricdata.NewInt64Point(t, int64(l.Value)) + case metricdata.TypeGaugeFloat64: + return metricdata.NewFloat64Point(t, l.Value) + default: + panic("unsupported metricdata.Type") + } +} diff --git a/vendor/go.opencensus.io/stats/view/collector.go b/vendor/go.opencensus.io/stats/view/collector.go index 32415d485..8a6a2c0fd 100644 --- a/vendor/go.opencensus.io/stats/view/collector.go +++ b/vendor/go.opencensus.io/stats/view/collector.go @@ -17,8 +17,7 @@ package view import ( "sort" - - "go.opencensus.io/exemplar" + "time" "go.opencensus.io/internal/tagencoding" "go.opencensus.io/tag" @@ -33,13 +32,13 @@ type collector struct { a *Aggregation } -func (c *collector) addSample(s string, e *exemplar.Exemplar) { +func (c *collector) addSample(s string, v float64, attachments map[string]interface{}, t time.Time) { aggregator, ok := c.signatures[s] if !ok { aggregator = c.a.newData() c.signatures[s] = aggregator } - aggregator.addSample(e) + aggregator.addSample(v, attachments, t) } // collectRows returns a snapshot of the collected Row values. diff --git a/vendor/go.opencensus.io/stats/view/view.go b/vendor/go.opencensus.io/stats/view/view.go index 7372f999f..37f88e1d9 100644 --- a/vendor/go.opencensus.io/stats/view/view.go +++ b/vendor/go.opencensus.io/stats/view/view.go @@ -24,8 +24,7 @@ import ( "sync/atomic" "time" - "go.opencensus.io/exemplar" - + "go.opencensus.io/metric/metricdata" "go.opencensus.io/stats" "go.opencensus.io/tag" ) @@ -118,15 +117,17 @@ func dropZeroBounds(bounds ...float64) []float64 { // viewInternal is the internal representation of a View. type viewInternal struct { - view *View // view is the canonicalized View definition associated with this view. - subscribed uint32 // 1 if someone is subscribed and data need to be exported, use atomic to access - collector *collector + view *View // view is the canonicalized View definition associated with this view. + subscribed uint32 // 1 if someone is subscribed and data need to be exported, use atomic to access + collector *collector + metricDescriptor *metricdata.Descriptor } func newViewInternal(v *View) (*viewInternal, error) { return &viewInternal{ - view: v, - collector: &collector{make(map[string]AggregationData), v.Aggregation}, + view: v, + collector: &collector{make(map[string]AggregationData), v.Aggregation}, + metricDescriptor: viewToMetricDescriptor(v), }, nil } @@ -152,12 +153,12 @@ func (v *viewInternal) collectedRows() []*Row { return v.collector.collectedRows(v.view.TagKeys) } -func (v *viewInternal) addSample(m *tag.Map, e *exemplar.Exemplar) { +func (v *viewInternal) addSample(m *tag.Map, val float64, attachments map[string]interface{}, t time.Time) { if !v.isSubscribed() { return } sig := string(encodeWithKeys(m, v.view.TagKeys)) - v.collector.addSample(sig, e) + v.collector.addSample(sig, val, attachments, t) } // A Data is a set of rows about usage of the single measure associated diff --git a/vendor/go.opencensus.io/stats/view/view_to_metric.go b/vendor/go.opencensus.io/stats/view/view_to_metric.go new file mode 100644 index 000000000..010f81bab --- /dev/null +++ b/vendor/go.opencensus.io/stats/view/view_to_metric.go @@ -0,0 +1,140 @@ +// Copyright 2019, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package view + +import ( + "time" + + "go.opencensus.io/metric/metricdata" + "go.opencensus.io/stats" +) + +func getUnit(unit string) metricdata.Unit { + switch unit { + case "1": + return metricdata.UnitDimensionless + case "ms": + return metricdata.UnitMilliseconds + case "By": + return metricdata.UnitBytes + } + return metricdata.UnitDimensionless +} + +func getType(v *View) metricdata.Type { + m := v.Measure + agg := v.Aggregation + + switch agg.Type { + case AggTypeSum: + switch m.(type) { + case *stats.Int64Measure: + return metricdata.TypeCumulativeInt64 + case *stats.Float64Measure: + return metricdata.TypeCumulativeFloat64 + default: + panic("unexpected measure type") + } + case AggTypeDistribution: + return metricdata.TypeCumulativeDistribution + case AggTypeLastValue: + switch m.(type) { + case *stats.Int64Measure: + return metricdata.TypeGaugeInt64 + case *stats.Float64Measure: + return metricdata.TypeGaugeFloat64 + default: + panic("unexpected measure type") + } + case AggTypeCount: + switch m.(type) { + case *stats.Int64Measure: + return metricdata.TypeCumulativeInt64 + case *stats.Float64Measure: + return metricdata.TypeCumulativeInt64 + default: + panic("unexpected measure type") + } + default: + panic("unexpected aggregation type") + } +} + +func getLableKeys(v *View) []metricdata.LabelKey { + labelKeys := []metricdata.LabelKey{} + for _, k := range v.TagKeys { + labelKeys = append(labelKeys, metricdata.LabelKey{Key: k.Name()}) + } + return labelKeys +} + +func viewToMetricDescriptor(v *View) *metricdata.Descriptor { + return &metricdata.Descriptor{ + Name: v.Name, + Description: v.Description, + Unit: getUnit(v.Measure.Unit()), + Type: getType(v), + LabelKeys: getLableKeys(v), + } +} + +func toLabelValues(row *Row, expectedKeys []metricdata.LabelKey) []metricdata.LabelValue { + labelValues := []metricdata.LabelValue{} + tagMap := make(map[string]string) + for _, tag := range row.Tags { + tagMap[tag.Key.Name()] = tag.Value + } + + for _, key := range expectedKeys { + if val, ok := tagMap[key.Key]; ok { + labelValues = append(labelValues, metricdata.NewLabelValue(val)) + } else { + labelValues = append(labelValues, metricdata.LabelValue{}) + } + } + return labelValues +} + +func rowToTimeseries(v *viewInternal, row *Row, now time.Time, startTime time.Time) *metricdata.TimeSeries { + return &metricdata.TimeSeries{ + Points: []metricdata.Point{row.Data.toPoint(v.metricDescriptor.Type, now)}, + LabelValues: toLabelValues(row, v.metricDescriptor.LabelKeys), + StartTime: startTime, + } +} + +func viewToMetric(v *viewInternal, now time.Time, startTime time.Time) *metricdata.Metric { + if v.metricDescriptor.Type == metricdata.TypeGaugeInt64 || + v.metricDescriptor.Type == metricdata.TypeGaugeFloat64 { + startTime = time.Time{} + } + + rows := v.collectedRows() + if len(rows) == 0 { + return nil + } + + ts := []*metricdata.TimeSeries{} + for _, row := range rows { + ts = append(ts, rowToTimeseries(v, row, now, startTime)) + } + + m := &metricdata.Metric{ + Descriptor: *v.metricDescriptor, + TimeSeries: ts, + } + return m +} diff --git a/vendor/go.opencensus.io/stats/view/worker.go b/vendor/go.opencensus.io/stats/view/worker.go index 0069e4bc1..2f3c018af 100644 --- a/vendor/go.opencensus.io/stats/view/worker.go +++ b/vendor/go.opencensus.io/stats/view/worker.go @@ -17,8 +17,11 @@ package view import ( "fmt" + "sync" "time" + "go.opencensus.io/metric/metricdata" + "go.opencensus.io/metric/metricproducer" "go.opencensus.io/stats" "go.opencensus.io/stats/internal" "go.opencensus.io/tag" @@ -43,6 +46,7 @@ type worker struct { timer *time.Ticker c chan command quit, done chan bool + mu sync.RWMutex } var defaultWorker *worker @@ -102,7 +106,7 @@ func RetrieveData(viewName string) ([]*Row, error) { return resp.rows, resp.err } -func record(tags *tag.Map, ms interface{}, attachments map[string]string) { +func record(tags *tag.Map, ms interface{}, attachments map[string]interface{}) { req := &recordReq{ tm: tags, ms: ms.([]stats.Measurement), @@ -143,6 +147,9 @@ func newWorker() *worker { } func (w *worker) start() { + prodMgr := metricproducer.GlobalManager() + prodMgr.AddProducer(w) + for { select { case cmd := <-w.c: @@ -159,6 +166,9 @@ func (w *worker) start() { } func (w *worker) stop() { + prodMgr := metricproducer.GlobalManager() + prodMgr.DeleteProducer(w) + w.quit <- true <-w.done } @@ -176,6 +186,8 @@ func (w *worker) getMeasureRef(name string) *measureRef { } func (w *worker) tryRegisterView(v *View) (*viewInternal, error) { + w.mu.Lock() + defer w.mu.Unlock() vi, err := newViewInternal(v) if err != nil { return nil, err @@ -195,6 +207,12 @@ func (w *worker) tryRegisterView(v *View) (*viewInternal, error) { return vi, nil } +func (w *worker) unregisterView(viewName string) { + w.mu.Lock() + defer w.mu.Unlock() + delete(w.views, viewName) +} + func (w *worker) reportView(v *viewInternal, now time.Time) { if !v.isSubscribed() { return @@ -218,7 +236,46 @@ func (w *worker) reportView(v *viewInternal, now time.Time) { } func (w *worker) reportUsage(now time.Time) { + w.mu.Lock() + defer w.mu.Unlock() for _, v := range w.views { w.reportView(v, now) } } + +func (w *worker) toMetric(v *viewInternal, now time.Time) *metricdata.Metric { + if !v.isSubscribed() { + return nil + } + + _, ok := w.startTimes[v] + if !ok { + w.startTimes[v] = now + } + + var startTime time.Time + if v.metricDescriptor.Type == metricdata.TypeGaugeInt64 || + v.metricDescriptor.Type == metricdata.TypeGaugeFloat64 { + startTime = time.Time{} + } else { + startTime = w.startTimes[v] + } + + return viewToMetric(v, now, startTime) +} + +// Read reads all view data and returns them as metrics. +// It is typically invoked by metric reader to export stats in metric format. +func (w *worker) Read() []*metricdata.Metric { + w.mu.Lock() + defer w.mu.Unlock() + now := time.Now() + metrics := make([]*metricdata.Metric, 0, len(w.views)) + for _, v := range w.views { + metric := w.toMetric(v, now) + if metric != nil { + metrics = append(metrics, metric) + } + } + return metrics +} diff --git a/vendor/go.opencensus.io/stats/view/worker_commands.go b/vendor/go.opencensus.io/stats/view/worker_commands.go index f71ec1eb0..0267e179a 100644 --- a/vendor/go.opencensus.io/stats/view/worker_commands.go +++ b/vendor/go.opencensus.io/stats/view/worker_commands.go @@ -21,8 +21,6 @@ import ( "strings" "time" - "go.opencensus.io/exemplar" - "go.opencensus.io/stats" "go.opencensus.io/stats/internal" "go.opencensus.io/tag" @@ -105,7 +103,7 @@ func (cmd *unregisterFromViewReq) handleCommand(w *worker) { // The collected data can be cleared. vi.clearRows() } - delete(w.views, name) + w.unregisterView(name) } cmd.done <- struct{}{} } @@ -123,6 +121,8 @@ type retrieveDataResp struct { } func (cmd *retrieveDataReq) handleCommand(w *worker) { + w.mu.Lock() + defer w.mu.Unlock() vi, ok := w.views[cmd.v] if !ok { cmd.c <- &retrieveDataResp{ @@ -150,23 +150,20 @@ func (cmd *retrieveDataReq) handleCommand(w *worker) { type recordReq struct { tm *tag.Map ms []stats.Measurement - attachments map[string]string + attachments map[string]interface{} t time.Time } func (cmd *recordReq) handleCommand(w *worker) { + w.mu.Lock() + defer w.mu.Unlock() for _, m := range cmd.ms { if (m == stats.Measurement{}) { // not registered continue } ref := w.getMeasureRef(m.Measure().Name()) for v := range ref.views { - e := &exemplar.Exemplar{ - Value: m.Value(), - Timestamp: cmd.t, - Attachments: cmd.attachments, - } - v.addSample(cmd.tm, e) + v.addSample(cmd.tm, m.Value(), cmd.attachments, time.Now()) } } } diff --git a/vendor/go.opencensus.io/tag/context.go b/vendor/go.opencensus.io/tag/context.go index dcc13f498..b27d1b26b 100644 --- a/vendor/go.opencensus.io/tag/context.go +++ b/vendor/go.opencensus.io/tag/context.go @@ -17,8 +17,6 @@ package tag import ( "context" - - "go.opencensus.io/exemplar" ) // FromContext returns the tag map stored in the context. @@ -43,25 +41,3 @@ func NewContext(ctx context.Context, m *Map) context.Context { type ctxKey struct{} var mapCtxKey = ctxKey{} - -func init() { - exemplar.RegisterAttachmentExtractor(extractTagsAttachments) -} - -func extractTagsAttachments(ctx context.Context, a exemplar.Attachments) exemplar.Attachments { - m := FromContext(ctx) - if m == nil { - return a - } - if len(m.m) == 0 { - return a - } - if a == nil { - a = make(map[string]string) - } - - for k, v := range m.m { - a[exemplar.KeyPrefixTag+k.Name()] = v - } - return a -} diff --git a/vendor/go.opencensus.io/tag/map.go b/vendor/go.opencensus.io/tag/map.go index 5b72ba6ad..0272ef85a 100644 --- a/vendor/go.opencensus.io/tag/map.go +++ b/vendor/go.opencensus.io/tag/map.go @@ -28,10 +28,15 @@ type Tag struct { Value string } +type tagContent struct { + value string + m metadatas +} + // Map is a map of tags. Use New to create a context containing // a new Map. type Map struct { - m map[Key]string + m map[Key]tagContent } // Value returns the value for the key if a value for the key exists. @@ -40,7 +45,7 @@ func (m *Map) Value(k Key) (string, bool) { return "", false } v, ok := m.m[k] - return v, ok + return v.value, ok } func (m *Map) String() string { @@ -62,21 +67,21 @@ func (m *Map) String() string { return buffer.String() } -func (m *Map) insert(k Key, v string) { +func (m *Map) insert(k Key, v string, md metadatas) { if _, ok := m.m[k]; ok { return } - m.m[k] = v + m.m[k] = tagContent{value: v, m: md} } -func (m *Map) update(k Key, v string) { +func (m *Map) update(k Key, v string, md metadatas) { if _, ok := m.m[k]; ok { - m.m[k] = v + m.m[k] = tagContent{value: v, m: md} } } -func (m *Map) upsert(k Key, v string) { - m.m[k] = v +func (m *Map) upsert(k Key, v string, md metadatas) { + m.m[k] = tagContent{value: v, m: md} } func (m *Map) delete(k Key) { @@ -84,7 +89,7 @@ func (m *Map) delete(k Key) { } func newMap() *Map { - return &Map{m: make(map[Key]string)} + return &Map{m: make(map[Key]tagContent)} } // Mutator modifies a tag map. @@ -95,13 +100,17 @@ type Mutator interface { // Insert returns a mutator that inserts a // value associated with k. If k already exists in the tag map, // mutator doesn't update the value. -func Insert(k Key, v string) Mutator { +// Metadata applies metadata to the tag. It is optional. +// Metadatas are applied in the order in which it is provided. +// If more than one metadata updates the same attribute then +// the update from the last metadata prevails. +func Insert(k Key, v string, mds ...Metadata) Mutator { return &mutator{ fn: func(m *Map) (*Map, error) { if !checkValue(v) { return nil, errInvalidValue } - m.insert(k, v) + m.insert(k, v, createMetadatas(mds...)) return m, nil }, } @@ -110,13 +119,17 @@ func Insert(k Key, v string) Mutator { // Update returns a mutator that updates the // value of the tag associated with k with v. If k doesn't // exists in the tag map, the mutator doesn't insert the value. -func Update(k Key, v string) Mutator { +// Metadata applies metadata to the tag. It is optional. +// Metadatas are applied in the order in which it is provided. +// If more than one metadata updates the same attribute then +// the update from the last metadata prevails. +func Update(k Key, v string, mds ...Metadata) Mutator { return &mutator{ fn: func(m *Map) (*Map, error) { if !checkValue(v) { return nil, errInvalidValue } - m.update(k, v) + m.update(k, v, createMetadatas(mds...)) return m, nil }, } @@ -126,18 +139,37 @@ func Update(k Key, v string) Mutator { // value of the tag associated with k with v. It inserts the // value if k doesn't exist already. It mutates the value // if k already exists. -func Upsert(k Key, v string) Mutator { +// Metadata applies metadata to the tag. It is optional. +// Metadatas are applied in the order in which it is provided. +// If more than one metadata updates the same attribute then +// the update from the last metadata prevails. +func Upsert(k Key, v string, mds ...Metadata) Mutator { return &mutator{ fn: func(m *Map) (*Map, error) { if !checkValue(v) { return nil, errInvalidValue } - m.upsert(k, v) + m.upsert(k, v, createMetadatas(mds...)) return m, nil }, } } +func createMetadatas(mds ...Metadata) metadatas { + var metas metadatas + if len(mds) > 0 { + for _, md := range mds { + if md != nil { + md(&metas) + } + } + } else { + WithTTL(TTLUnlimitedPropagation)(&metas) + } + return metas + +} + // Delete returns a mutator that deletes // the value associated with k. func Delete(k Key) Mutator { @@ -160,10 +192,10 @@ func New(ctx context.Context, mutator ...Mutator) (context.Context, error) { if !checkKeyName(k.Name()) { return ctx, fmt.Errorf("key:%q: %v", k, errInvalidKeyName) } - if !checkValue(v) { + if !checkValue(v.value) { return ctx, fmt.Errorf("key:%q value:%q: %v", k.Name(), v, errInvalidValue) } - m.insert(k, v) + m.insert(k, v.value, v.m) } } var err error diff --git a/vendor/go.opencensus.io/tag/map_codec.go b/vendor/go.opencensus.io/tag/map_codec.go index e88e72777..f8b582761 100644 --- a/vendor/go.opencensus.io/tag/map_codec.go +++ b/vendor/go.opencensus.io/tag/map_codec.go @@ -170,9 +170,11 @@ func Encode(m *Map) []byte { } eg.writeByte(byte(tagsVersionID)) for k, v := range m.m { - eg.writeByte(byte(keyTypeString)) - eg.writeStringWithVarintLen(k.name) - eg.writeBytesWithVarintLen([]byte(v)) + if v.m.ttl.ttl == valueTTLUnlimitedPropagation { + eg.writeByte(byte(keyTypeString)) + eg.writeStringWithVarintLen(k.name) + eg.writeBytesWithVarintLen([]byte(v.value)) + } } return eg.bytes() } @@ -190,7 +192,7 @@ func Decode(bytes []byte) (*Map, error) { // DecodeEach decodes the given serialized tag map, calling handler for each // tag key and value decoded. -func DecodeEach(bytes []byte, fn func(key Key, val string)) error { +func DecodeEach(bytes []byte, fn func(key Key, val string, md metadatas)) error { eg := &encoderGRPC{ buf: bytes, } @@ -228,7 +230,7 @@ func DecodeEach(bytes []byte, fn func(key Key, val string)) error { if !checkValue(val) { return errInvalidValue } - fn(key, val) + fn(key, val, createMetadatas(WithTTL(TTLUnlimitedPropagation))) if err != nil { return err } diff --git a/vendor/go.opencensus.io/tag/metadata.go b/vendor/go.opencensus.io/tag/metadata.go new file mode 100644 index 000000000..6571a583e --- /dev/null +++ b/vendor/go.opencensus.io/tag/metadata.go @@ -0,0 +1,52 @@ +// Copyright 2019, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package tag + +const ( + // valueTTLNoPropagation prevents tag from propagating. + valueTTLNoPropagation = 0 + + // valueTTLUnlimitedPropagation allows tag to propagate without any limits on number of hops. + valueTTLUnlimitedPropagation = -1 +) + +// TTL is metadata that specifies number of hops a tag can propagate. +// Details about TTL metadata is specified at https://github.com/census-instrumentation/opencensus-specs/blob/master/tags/TagMap.md#tagmetadata +type TTL struct { + ttl int +} + +var ( + // TTLUnlimitedPropagation is TTL metadata that allows tag to propagate without any limits on number of hops. + TTLUnlimitedPropagation = TTL{ttl: valueTTLUnlimitedPropagation} + + // TTLNoPropagation is TTL metadata that prevents tag from propagating. + TTLNoPropagation = TTL{ttl: valueTTLNoPropagation} +) + +type metadatas struct { + ttl TTL +} + +// Metadata applies metadatas specified by the function. +type Metadata func(*metadatas) + +// WithTTL applies metadata with provided ttl. +func WithTTL(ttl TTL) Metadata { + return func(m *metadatas) { + m.ttl = ttl + } +} diff --git a/vendor/go.opencensus.io/tag/profile_19.go b/vendor/go.opencensus.io/tag/profile_19.go index f81cd0b4a..b34d95e34 100644 --- a/vendor/go.opencensus.io/tag/profile_19.go +++ b/vendor/go.opencensus.io/tag/profile_19.go @@ -25,7 +25,7 @@ func do(ctx context.Context, f func(ctx context.Context)) { m := FromContext(ctx) keyvals := make([]string, 0, 2*len(m.m)) for k, v := range m.m { - keyvals = append(keyvals, k.Name(), v) + keyvals = append(keyvals, k.Name(), v.value) } pprof.Do(ctx, pprof.Labels(keyvals...), f) } diff --git a/vendor/go.uber.org/zap/internal/ztest/doc.go b/vendor/go.uber.org/zap/internal/ztest/doc.go new file mode 100644 index 000000000..cd4b98cbc --- /dev/null +++ b/vendor/go.uber.org/zap/internal/ztest/doc.go @@ -0,0 +1,24 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package ztest provides low-level helpers for testing log output. These +// utilities are helpful in zap's own unit tests, but any assertions using +// them are strongly coupled to a single encoding. +package ztest // import "go.uber.org/zap/internal/ztest" diff --git a/vendor/go.uber.org/zap/internal/ztest/timeout.go b/vendor/go.uber.org/zap/internal/ztest/timeout.go new file mode 100644 index 000000000..f7d58f316 --- /dev/null +++ b/vendor/go.uber.org/zap/internal/ztest/timeout.go @@ -0,0 +1,59 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package ztest + +import ( + "log" + "os" + "strconv" + "time" +) + +var _timeoutScale = 1.0 + +// Timeout scales the provided duration by $TEST_TIMEOUT_SCALE. +func Timeout(base time.Duration) time.Duration { + return time.Duration(float64(base) * _timeoutScale) +} + +// Sleep scales the sleep duration by $TEST_TIMEOUT_SCALE. +func Sleep(base time.Duration) { + time.Sleep(Timeout(base)) +} + +// Initialize checks the environment and alters the timeout scale accordingly. +// It returns a function to undo the scaling. +func Initialize(factor string) func() { + original := _timeoutScale + fv, err := strconv.ParseFloat(factor, 64) + if err != nil { + panic(err) + } + _timeoutScale = fv + return func() { _timeoutScale = original } +} + +func init() { + if v := os.Getenv("TEST_TIMEOUT_SCALE"); v != "" { + Initialize(v) + log.Printf("Scaling timeouts by %vx.\n", _timeoutScale) + } +} diff --git a/vendor/go.uber.org/zap/internal/ztest/writer.go b/vendor/go.uber.org/zap/internal/ztest/writer.go new file mode 100644 index 000000000..9fdd5805e --- /dev/null +++ b/vendor/go.uber.org/zap/internal/ztest/writer.go @@ -0,0 +1,96 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package ztest + +import ( + "bytes" + "errors" + "io/ioutil" + "strings" +) + +// A Syncer is a spy for the Sync portion of zapcore.WriteSyncer. +type Syncer struct { + err error + called bool +} + +// SetError sets the error that the Sync method will return. +func (s *Syncer) SetError(err error) { + s.err = err +} + +// Sync records that it was called, then returns the user-supplied error (if +// any). +func (s *Syncer) Sync() error { + s.called = true + return s.err +} + +// Called reports whether the Sync method was called. +func (s *Syncer) Called() bool { + return s.called +} + +// A Discarder sends all writes to ioutil.Discard. +type Discarder struct{ Syncer } + +// Write implements io.Writer. +func (d *Discarder) Write(b []byte) (int, error) { + return ioutil.Discard.Write(b) +} + +// FailWriter is a WriteSyncer that always returns an error on writes. +type FailWriter struct{ Syncer } + +// Write implements io.Writer. +func (w FailWriter) Write(b []byte) (int, error) { + return len(b), errors.New("failed") +} + +// ShortWriter is a WriteSyncer whose write method never fails, but +// nevertheless fails to the last byte of the input. +type ShortWriter struct{ Syncer } + +// Write implements io.Writer. +func (w ShortWriter) Write(b []byte) (int, error) { + return len(b) - 1, nil +} + +// Buffer is an implementation of zapcore.WriteSyncer that sends all writes to +// a bytes.Buffer. It has convenience methods to split the accumulated buffer +// on newlines. +type Buffer struct { + bytes.Buffer + Syncer +} + +// Lines returns the current buffer contents, split on newlines. +func (b *Buffer) Lines() []string { + output := strings.Split(b.String(), "\n") + return output[:len(output)-1] +} + +// Stripped returns the current buffer contents with the last trailing newline +// stripped. +func (b *Buffer) Stripped() string { + return strings.TrimRight(b.String(), "\n") +} diff --git a/vendor/go.uber.org/zap/zaptest/doc.go b/vendor/go.uber.org/zap/zaptest/doc.go new file mode 100644 index 000000000..b377859c4 --- /dev/null +++ b/vendor/go.uber.org/zap/zaptest/doc.go @@ -0,0 +1,22 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package zaptest provides a variety of helpers for testing log output. +package zaptest // import "go.uber.org/zap/zaptest" diff --git a/vendor/go.uber.org/zap/zaptest/logger.go b/vendor/go.uber.org/zap/zaptest/logger.go new file mode 100644 index 000000000..1e2451c26 --- /dev/null +++ b/vendor/go.uber.org/zap/zaptest/logger.go @@ -0,0 +1,140 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zaptest + +import ( + "bytes" + + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +// LoggerOption configures the test logger built by NewLogger. +type LoggerOption interface { + applyLoggerOption(*loggerOptions) +} + +type loggerOptions struct { + Level zapcore.LevelEnabler + zapOptions []zap.Option +} + +type loggerOptionFunc func(*loggerOptions) + +func (f loggerOptionFunc) applyLoggerOption(opts *loggerOptions) { + f(opts) +} + +// Level controls which messages are logged by a test Logger built by +// NewLogger. +func Level(enab zapcore.LevelEnabler) LoggerOption { + return loggerOptionFunc(func(opts *loggerOptions) { + opts.Level = enab + }) +} + +// WrapOptions adds zap.Option's to a test Logger built by NewLogger. +func WrapOptions(zapOpts ...zap.Option) LoggerOption { + return loggerOptionFunc(func(opts *loggerOptions) { + opts.zapOptions = zapOpts + }) +} + +// NewLogger builds a new Logger that logs all messages to the given +// testing.TB. +// +// logger := zaptest.NewLogger(t) +// +// Use this with a *testing.T or *testing.B to get logs which get printed only +// if a test fails or if you ran go test -v. +// +// The returned logger defaults to logging debug level messages and above. +// This may be changed by passing a zaptest.Level during construction. +// +// logger := zaptest.NewLogger(t, zaptest.Level(zap.WarnLevel)) +// +// You may also pass zap.Option's to customize test logger. +// +// logger := zaptest.NewLogger(t, zaptest.WrapOptions(zap.AddCaller())) +func NewLogger(t TestingT, opts ...LoggerOption) *zap.Logger { + cfg := loggerOptions{ + Level: zapcore.DebugLevel, + } + for _, o := range opts { + o.applyLoggerOption(&cfg) + } + + writer := newTestingWriter(t) + zapOptions := []zap.Option{ + // Send zap errors to the same writer and mark the test as failed if + // that happens. + zap.ErrorOutput(writer.WithMarkFailed(true)), + } + zapOptions = append(zapOptions, cfg.zapOptions...) + + return zap.New( + zapcore.NewCore( + zapcore.NewConsoleEncoder(zap.NewDevelopmentEncoderConfig()), + writer, + cfg.Level, + ), + zapOptions..., + ) +} + +// testingWriter is a WriteSyncer that writes to the given testing.TB. +type testingWriter struct { + t TestingT + + // If true, the test will be marked as failed if this testingWriter is + // ever used. + markFailed bool +} + +func newTestingWriter(t TestingT) testingWriter { + return testingWriter{t: t} +} + +// WithMarkFailed returns a copy of this testingWriter with markFailed set to +// the provided value. +func (w testingWriter) WithMarkFailed(v bool) testingWriter { + w.markFailed = v + return w +} + +func (w testingWriter) Write(p []byte) (n int, err error) { + n = len(p) + + // Strip trailing newline because t.Log always adds one. + p = bytes.TrimRight(p, "\n") + + // Note: t.Log is safe for concurrent use. + w.t.Logf("%s", p) + if w.markFailed { + w.t.Fail() + } + + return n, nil +} + +func (w testingWriter) Sync() error { + return nil +} diff --git a/vendor/go.uber.org/zap/zaptest/testingt.go b/vendor/go.uber.org/zap/zaptest/testingt.go new file mode 100644 index 000000000..792463be3 --- /dev/null +++ b/vendor/go.uber.org/zap/zaptest/testingt.go @@ -0,0 +1,47 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zaptest + +// TestingT is a subset of the API provided by all *testing.T and *testing.B +// objects. +type TestingT interface { + // Logs the given message without failing the test. + Logf(string, ...interface{}) + + // Logs the given message and marks the test as failed. + Errorf(string, ...interface{}) + + // Marks the test as failed. + Fail() + + // Returns true if the test has been marked as failed. + Failed() bool + + // Returns the name of the test. + Name() string + + // Marks the test as failed and stops execution of that test. + FailNow() +} + +// Note: We currently only rely on Logf. We are including Errorf and FailNow +// in the interface in anticipation of future need since we can't extend the +// interface without a breaking change. diff --git a/vendor/go.uber.org/zap/zaptest/timeout.go b/vendor/go.uber.org/zap/zaptest/timeout.go new file mode 100644 index 000000000..f0be44416 --- /dev/null +++ b/vendor/go.uber.org/zap/zaptest/timeout.go @@ -0,0 +1,45 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zaptest + +import ( + "time" + + "go.uber.org/zap/internal/ztest" +) + +// Timeout scales the provided duration by $TEST_TIMEOUT_SCALE. +// +// Deprecated: This function is intended for internal testing and shouldn't be +// used outside zap itself. It was introduced before Go supported internal +// packages. +func Timeout(base time.Duration) time.Duration { + return ztest.Timeout(base) +} + +// Sleep scales the sleep duration by $TEST_TIMEOUT_SCALE. +// +// Deprecated: This function is intended for internal testing and shouldn't be +// used outside zap itself. It was introduced before Go supported internal +// packages. +func Sleep(base time.Duration) { + ztest.Sleep(base) +} diff --git a/vendor/go.uber.org/zap/zaptest/writer.go b/vendor/go.uber.org/zap/zaptest/writer.go new file mode 100644 index 000000000..0701630e1 --- /dev/null +++ b/vendor/go.uber.org/zap/zaptest/writer.go @@ -0,0 +1,44 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zaptest + +import "go.uber.org/zap/internal/ztest" + +type ( + // A Syncer is a spy for the Sync portion of zapcore.WriteSyncer. + Syncer = ztest.Syncer + + // A Discarder sends all writes to ioutil.Discard. + Discarder = ztest.Discarder + + // FailWriter is a WriteSyncer that always returns an error on writes. + FailWriter = ztest.FailWriter + + // ShortWriter is a WriteSyncer whose write method never returns an error, + // but always reports that it wrote one byte less than the input slice's + // length (thus, a "short write"). + ShortWriter = ztest.ShortWriter + + // Buffer is an implementation of zapcore.WriteSyncer that sends all writes to + // a bytes.Buffer. It has convenience methods to split the accumulated buffer + // on newlines. + Buffer = ztest.Buffer +) diff --git a/vendor/k8s.io/client-go/tools/record/OWNERS b/vendor/k8s.io/client-go/tools/record/OWNERS new file mode 100644 index 000000000..4dd54bbce --- /dev/null +++ b/vendor/k8s.io/client-go/tools/record/OWNERS @@ -0,0 +1,27 @@ +reviewers: +- lavalamp +- smarterclayton +- wojtek-t +- deads2k +- derekwaynecarr +- caesarxuchao +- vishh +- mikedanese +- liggitt +- nikhiljindal +- erictune +- pmorie +- dchen1107 +- saad-ali +- luxas +- yifan-gu +- eparis +- mwielgus +- timothysc +- jsafrane +- dims +- krousey +- a-robinson +- aveshagarwal +- resouer +- cjcullen diff --git a/vendor/k8s.io/client-go/tools/record/doc.go b/vendor/k8s.io/client-go/tools/record/doc.go new file mode 100644 index 000000000..657ddecbc --- /dev/null +++ b/vendor/k8s.io/client-go/tools/record/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package record has all client logic for recording and reporting events. +package record // import "k8s.io/client-go/tools/record" diff --git a/vendor/k8s.io/client-go/tools/record/event.go b/vendor/k8s.io/client-go/tools/record/event.go new file mode 100644 index 000000000..168dfa80c --- /dev/null +++ b/vendor/k8s.io/client-go/tools/record/event.go @@ -0,0 +1,322 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package record + +import ( + "fmt" + "math/rand" + "time" + + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/clock" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/watch" + restclient "k8s.io/client-go/rest" + ref "k8s.io/client-go/tools/reference" + + "net/http" + + "github.com/golang/glog" +) + +const maxTriesPerEvent = 12 + +var defaultSleepDuration = 10 * time.Second + +const maxQueuedEvents = 1000 + +// EventSink knows how to store events (client.Client implements it.) +// EventSink must respect the namespace that will be embedded in 'event'. +// It is assumed that EventSink will return the same sorts of errors as +// pkg/client's REST client. +type EventSink interface { + Create(event *v1.Event) (*v1.Event, error) + Update(event *v1.Event) (*v1.Event, error) + Patch(oldEvent *v1.Event, data []byte) (*v1.Event, error) +} + +// EventRecorder knows how to record events on behalf of an EventSource. +type EventRecorder interface { + // Event constructs an event from the given information and puts it in the queue for sending. + // 'object' is the object this event is about. Event will make a reference-- or you may also + // pass a reference to the object directly. + // 'type' of this event, and can be one of Normal, Warning. New types could be added in future + // 'reason' is the reason this event is generated. 'reason' should be short and unique; it + // should be in UpperCamelCase format (starting with a capital letter). "reason" will be used + // to automate handling of events, so imagine people writing switch statements to handle them. + // You want to make that easy. + // 'message' is intended to be human readable. + // + // The resulting event will be created in the same namespace as the reference object. + Event(object runtime.Object, eventtype, reason, message string) + + // Eventf is just like Event, but with Sprintf for the message field. + Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) + + // PastEventf is just like Eventf, but with an option to specify the event's 'timestamp' field. + PastEventf(object runtime.Object, timestamp metav1.Time, eventtype, reason, messageFmt string, args ...interface{}) + + // AnnotatedEventf is just like eventf, but with annotations attached + AnnotatedEventf(object runtime.Object, annotations map[string]string, eventtype, reason, messageFmt string, args ...interface{}) +} + +// EventBroadcaster knows how to receive events and send them to any EventSink, watcher, or log. +type EventBroadcaster interface { + // StartEventWatcher starts sending events received from this EventBroadcaster to the given + // event handler function. The return value can be ignored or used to stop recording, if + // desired. + StartEventWatcher(eventHandler func(*v1.Event)) watch.Interface + + // StartRecordingToSink starts sending events received from this EventBroadcaster to the given + // sink. The return value can be ignored or used to stop recording, if desired. + StartRecordingToSink(sink EventSink) watch.Interface + + // StartLogging starts sending events received from this EventBroadcaster to the given logging + // function. The return value can be ignored or used to stop recording, if desired. + StartLogging(logf func(format string, args ...interface{})) watch.Interface + + // NewRecorder returns an EventRecorder that can be used to send events to this EventBroadcaster + // with the event source set to the given event source. + NewRecorder(scheme *runtime.Scheme, source v1.EventSource) EventRecorder +} + +// Creates a new event broadcaster. +func NewBroadcaster() EventBroadcaster { + return &eventBroadcasterImpl{watch.NewBroadcaster(maxQueuedEvents, watch.DropIfChannelFull), defaultSleepDuration} +} + +func NewBroadcasterForTests(sleepDuration time.Duration) EventBroadcaster { + return &eventBroadcasterImpl{watch.NewBroadcaster(maxQueuedEvents, watch.DropIfChannelFull), sleepDuration} +} + +type eventBroadcasterImpl struct { + *watch.Broadcaster + sleepDuration time.Duration +} + +// StartRecordingToSink starts sending events received from the specified eventBroadcaster to the given sink. +// The return value can be ignored or used to stop recording, if desired. +// TODO: make me an object with parameterizable queue length and retry interval +func (eventBroadcaster *eventBroadcasterImpl) StartRecordingToSink(sink EventSink) watch.Interface { + // The default math/rand package functions aren't thread safe, so create a + // new Rand object for each StartRecording call. + randGen := rand.New(rand.NewSource(time.Now().UnixNano())) + eventCorrelator := NewEventCorrelator(clock.RealClock{}) + return eventBroadcaster.StartEventWatcher( + func(event *v1.Event) { + recordToSink(sink, event, eventCorrelator, randGen, eventBroadcaster.sleepDuration) + }) +} + +func recordToSink(sink EventSink, event *v1.Event, eventCorrelator *EventCorrelator, randGen *rand.Rand, sleepDuration time.Duration) { + // Make a copy before modification, because there could be multiple listeners. + // Events are safe to copy like this. + eventCopy := *event + event = &eventCopy + result, err := eventCorrelator.EventCorrelate(event) + if err != nil { + utilruntime.HandleError(err) + } + if result.Skip { + return + } + tries := 0 + for { + if recordEvent(sink, result.Event, result.Patch, result.Event.Count > 1, eventCorrelator) { + break + } + tries++ + if tries >= maxTriesPerEvent { + glog.Errorf("Unable to write event '%#v' (retry limit exceeded!)", event) + break + } + // Randomize the first sleep so that various clients won't all be + // synced up if the master goes down. + if tries == 1 { + time.Sleep(time.Duration(float64(sleepDuration) * randGen.Float64())) + } else { + time.Sleep(sleepDuration) + } + } +} + +func isKeyNotFoundError(err error) bool { + statusErr, _ := err.(*errors.StatusError) + + if statusErr != nil && statusErr.Status().Code == http.StatusNotFound { + return true + } + + return false +} + +// recordEvent attempts to write event to a sink. It returns true if the event +// was successfully recorded or discarded, false if it should be retried. +// If updateExistingEvent is false, it creates a new event, otherwise it updates +// existing event. +func recordEvent(sink EventSink, event *v1.Event, patch []byte, updateExistingEvent bool, eventCorrelator *EventCorrelator) bool { + var newEvent *v1.Event + var err error + if updateExistingEvent { + newEvent, err = sink.Patch(event, patch) + } + // Update can fail because the event may have been removed and it no longer exists. + if !updateExistingEvent || (updateExistingEvent && isKeyNotFoundError(err)) { + // Making sure that ResourceVersion is empty on creation + event.ResourceVersion = "" + newEvent, err = sink.Create(event) + } + if err == nil { + // we need to update our event correlator with the server returned state to handle name/resourceversion + eventCorrelator.UpdateState(newEvent) + return true + } + + // If we can't contact the server, then hold everything while we keep trying. + // Otherwise, something about the event is malformed and we should abandon it. + switch err.(type) { + case *restclient.RequestConstructionError: + // We will construct the request the same next time, so don't keep trying. + glog.Errorf("Unable to construct event '%#v': '%v' (will not retry!)", event, err) + return true + case *errors.StatusError: + if errors.IsAlreadyExists(err) { + glog.V(5).Infof("Server rejected event '%#v': '%v' (will not retry!)", event, err) + } else { + glog.Errorf("Server rejected event '%#v': '%v' (will not retry!)", event, err) + } + return true + case *errors.UnexpectedObjectError: + // We don't expect this; it implies the server's response didn't match a + // known pattern. Go ahead and retry. + default: + // This case includes actual http transport errors. Go ahead and retry. + } + glog.Errorf("Unable to write event: '%v' (may retry after sleeping)", err) + return false +} + +// StartLogging starts sending events received from this EventBroadcaster to the given logging function. +// The return value can be ignored or used to stop recording, if desired. +func (eventBroadcaster *eventBroadcasterImpl) StartLogging(logf func(format string, args ...interface{})) watch.Interface { + return eventBroadcaster.StartEventWatcher( + func(e *v1.Event) { + logf("Event(%#v): type: '%v' reason: '%v' %v", e.InvolvedObject, e.Type, e.Reason, e.Message) + }) +} + +// StartEventWatcher starts sending events received from this EventBroadcaster to the given event handler function. +// The return value can be ignored or used to stop recording, if desired. +func (eventBroadcaster *eventBroadcasterImpl) StartEventWatcher(eventHandler func(*v1.Event)) watch.Interface { + watcher := eventBroadcaster.Watch() + go func() { + defer utilruntime.HandleCrash() + for watchEvent := range watcher.ResultChan() { + event, ok := watchEvent.Object.(*v1.Event) + if !ok { + // This is all local, so there's no reason this should + // ever happen. + continue + } + eventHandler(event) + } + }() + return watcher +} + +// NewRecorder returns an EventRecorder that records events with the given event source. +func (eventBroadcaster *eventBroadcasterImpl) NewRecorder(scheme *runtime.Scheme, source v1.EventSource) EventRecorder { + return &recorderImpl{scheme, source, eventBroadcaster.Broadcaster, clock.RealClock{}} +} + +type recorderImpl struct { + scheme *runtime.Scheme + source v1.EventSource + *watch.Broadcaster + clock clock.Clock +} + +func (recorder *recorderImpl) generateEvent(object runtime.Object, annotations map[string]string, timestamp metav1.Time, eventtype, reason, message string) { + ref, err := ref.GetReference(recorder.scheme, object) + if err != nil { + glog.Errorf("Could not construct reference to: '%#v' due to: '%v'. Will not report event: '%v' '%v' '%v'", object, err, eventtype, reason, message) + return + } + + if !validateEventType(eventtype) { + glog.Errorf("Unsupported event type: '%v'", eventtype) + return + } + + event := recorder.makeEvent(ref, annotations, eventtype, reason, message) + event.Source = recorder.source + + go func() { + // NOTE: events should be a non-blocking operation + defer utilruntime.HandleCrash() + recorder.Action(watch.Added, event) + }() +} + +func validateEventType(eventtype string) bool { + switch eventtype { + case v1.EventTypeNormal, v1.EventTypeWarning: + return true + } + return false +} + +func (recorder *recorderImpl) Event(object runtime.Object, eventtype, reason, message string) { + recorder.generateEvent(object, nil, metav1.Now(), eventtype, reason, message) +} + +func (recorder *recorderImpl) Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) { + recorder.Event(object, eventtype, reason, fmt.Sprintf(messageFmt, args...)) +} + +func (recorder *recorderImpl) PastEventf(object runtime.Object, timestamp metav1.Time, eventtype, reason, messageFmt string, args ...interface{}) { + recorder.generateEvent(object, nil, timestamp, eventtype, reason, fmt.Sprintf(messageFmt, args...)) +} + +func (recorder *recorderImpl) AnnotatedEventf(object runtime.Object, annotations map[string]string, eventtype, reason, messageFmt string, args ...interface{}) { + recorder.generateEvent(object, annotations, metav1.Now(), eventtype, reason, fmt.Sprintf(messageFmt, args...)) +} + +func (recorder *recorderImpl) makeEvent(ref *v1.ObjectReference, annotations map[string]string, eventtype, reason, message string) *v1.Event { + t := metav1.Time{Time: recorder.clock.Now()} + namespace := ref.Namespace + if namespace == "" { + namespace = metav1.NamespaceDefault + } + return &v1.Event{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%v.%x", ref.Name, t.UnixNano()), + Namespace: namespace, + Annotations: annotations, + }, + InvolvedObject: *ref, + Reason: reason, + Message: message, + FirstTimestamp: t, + LastTimestamp: t, + Count: 1, + Type: eventtype, + } +} diff --git a/vendor/k8s.io/client-go/tools/record/events_cache.go b/vendor/k8s.io/client-go/tools/record/events_cache.go new file mode 100644 index 000000000..a42084f3a --- /dev/null +++ b/vendor/k8s.io/client-go/tools/record/events_cache.go @@ -0,0 +1,462 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package record + +import ( + "encoding/json" + "fmt" + "strings" + "sync" + "time" + + "github.com/golang/groupcache/lru" + + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/clock" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/strategicpatch" + "k8s.io/client-go/util/flowcontrol" +) + +const ( + maxLruCacheEntries = 4096 + + // if we see the same event that varies only by message + // more than 10 times in a 10 minute period, aggregate the event + defaultAggregateMaxEvents = 10 + defaultAggregateIntervalInSeconds = 600 + + // by default, allow a source to send 25 events about an object + // but control the refill rate to 1 new event every 5 minutes + // this helps control the long-tail of events for things that are always + // unhealthy + defaultSpamBurst = 25 + defaultSpamQPS = 1. / 300. +) + +// getEventKey builds unique event key based on source, involvedObject, reason, message +func getEventKey(event *v1.Event) string { + return strings.Join([]string{ + event.Source.Component, + event.Source.Host, + event.InvolvedObject.Kind, + event.InvolvedObject.Namespace, + event.InvolvedObject.Name, + event.InvolvedObject.FieldPath, + string(event.InvolvedObject.UID), + event.InvolvedObject.APIVersion, + event.Type, + event.Reason, + event.Message, + }, + "") +} + +// getSpamKey builds unique event key based on source, involvedObject +func getSpamKey(event *v1.Event) string { + return strings.Join([]string{ + event.Source.Component, + event.Source.Host, + event.InvolvedObject.Kind, + event.InvolvedObject.Namespace, + event.InvolvedObject.Name, + string(event.InvolvedObject.UID), + event.InvolvedObject.APIVersion, + }, + "") +} + +// EventFilterFunc is a function that returns true if the event should be skipped +type EventFilterFunc func(event *v1.Event) bool + +// EventSourceObjectSpamFilter is responsible for throttling +// the amount of events a source and object can produce. +type EventSourceObjectSpamFilter struct { + sync.RWMutex + + // the cache that manages last synced state + cache *lru.Cache + + // burst is the amount of events we allow per source + object + burst int + + // qps is the refill rate of the token bucket in queries per second + qps float32 + + // clock is used to allow for testing over a time interval + clock clock.Clock +} + +// NewEventSourceObjectSpamFilter allows burst events from a source about an object with the specified qps refill. +func NewEventSourceObjectSpamFilter(lruCacheSize, burst int, qps float32, clock clock.Clock) *EventSourceObjectSpamFilter { + return &EventSourceObjectSpamFilter{ + cache: lru.New(lruCacheSize), + burst: burst, + qps: qps, + clock: clock, + } +} + +// spamRecord holds data used to perform spam filtering decisions. +type spamRecord struct { + // rateLimiter controls the rate of events about this object + rateLimiter flowcontrol.RateLimiter +} + +// Filter controls that a given source+object are not exceeding the allowed rate. +func (f *EventSourceObjectSpamFilter) Filter(event *v1.Event) bool { + var record spamRecord + + // controls our cached information about this event (source+object) + eventKey := getSpamKey(event) + + // do we have a record of similar events in our cache? + f.Lock() + defer f.Unlock() + value, found := f.cache.Get(eventKey) + if found { + record = value.(spamRecord) + } + + // verify we have a rate limiter for this record + if record.rateLimiter == nil { + record.rateLimiter = flowcontrol.NewTokenBucketRateLimiterWithClock(f.qps, f.burst, f.clock) + } + + // ensure we have available rate + filter := !record.rateLimiter.TryAccept() + + // update the cache + f.cache.Add(eventKey, record) + + return filter +} + +// EventAggregatorKeyFunc is responsible for grouping events for aggregation +// It returns a tuple of the following: +// aggregateKey - key the identifies the aggregate group to bucket this event +// localKey - key that makes this event in the local group +type EventAggregatorKeyFunc func(event *v1.Event) (aggregateKey string, localKey string) + +// EventAggregatorByReasonFunc aggregates events by exact match on event.Source, event.InvolvedObject, event.Type and event.Reason +func EventAggregatorByReasonFunc(event *v1.Event) (string, string) { + return strings.Join([]string{ + event.Source.Component, + event.Source.Host, + event.InvolvedObject.Kind, + event.InvolvedObject.Namespace, + event.InvolvedObject.Name, + string(event.InvolvedObject.UID), + event.InvolvedObject.APIVersion, + event.Type, + event.Reason, + }, + ""), event.Message +} + +// EventAggregatorMessageFunc is responsible for producing an aggregation message +type EventAggregatorMessageFunc func(event *v1.Event) string + +// EventAggregratorByReasonMessageFunc returns an aggregate message by prefixing the incoming message +func EventAggregatorByReasonMessageFunc(event *v1.Event) string { + return "(combined from similar events): " + event.Message +} + +// EventAggregator identifies similar events and aggregates them into a single event +type EventAggregator struct { + sync.RWMutex + + // The cache that manages aggregation state + cache *lru.Cache + + // The function that groups events for aggregation + keyFunc EventAggregatorKeyFunc + + // The function that generates a message for an aggregate event + messageFunc EventAggregatorMessageFunc + + // The maximum number of events in the specified interval before aggregation occurs + maxEvents uint + + // The amount of time in seconds that must transpire since the last occurrence of a similar event before it's considered new + maxIntervalInSeconds uint + + // clock is used to allow for testing over a time interval + clock clock.Clock +} + +// NewEventAggregator returns a new instance of an EventAggregator +func NewEventAggregator(lruCacheSize int, keyFunc EventAggregatorKeyFunc, messageFunc EventAggregatorMessageFunc, + maxEvents int, maxIntervalInSeconds int, clock clock.Clock) *EventAggregator { + return &EventAggregator{ + cache: lru.New(lruCacheSize), + keyFunc: keyFunc, + messageFunc: messageFunc, + maxEvents: uint(maxEvents), + maxIntervalInSeconds: uint(maxIntervalInSeconds), + clock: clock, + } +} + +// aggregateRecord holds data used to perform aggregation decisions +type aggregateRecord struct { + // we track the number of unique local keys we have seen in the aggregate set to know when to actually aggregate + // if the size of this set exceeds the max, we know we need to aggregate + localKeys sets.String + // The last time at which the aggregate was recorded + lastTimestamp metav1.Time +} + +// EventAggregate checks if a similar event has been seen according to the +// aggregation configuration (max events, max interval, etc) and returns: +// +// - The (potentially modified) event that should be created +// - The cache key for the event, for correlation purposes. This will be set to +// the full key for normal events, and to the result of +// EventAggregatorMessageFunc for aggregate events. +func (e *EventAggregator) EventAggregate(newEvent *v1.Event) (*v1.Event, string) { + now := metav1.NewTime(e.clock.Now()) + var record aggregateRecord + // eventKey is the full cache key for this event + eventKey := getEventKey(newEvent) + // aggregateKey is for the aggregate event, if one is needed. + aggregateKey, localKey := e.keyFunc(newEvent) + + // Do we have a record of similar events in our cache? + e.Lock() + defer e.Unlock() + value, found := e.cache.Get(aggregateKey) + if found { + record = value.(aggregateRecord) + } + + // Is the previous record too old? If so, make a fresh one. Note: if we didn't + // find a similar record, its lastTimestamp will be the zero value, so we + // create a new one in that case. + maxInterval := time.Duration(e.maxIntervalInSeconds) * time.Second + interval := now.Time.Sub(record.lastTimestamp.Time) + if interval > maxInterval { + record = aggregateRecord{localKeys: sets.NewString()} + } + + // Write the new event into the aggregation record and put it on the cache + record.localKeys.Insert(localKey) + record.lastTimestamp = now + e.cache.Add(aggregateKey, record) + + // If we are not yet over the threshold for unique events, don't correlate them + if uint(record.localKeys.Len()) < e.maxEvents { + return newEvent, eventKey + } + + // do not grow our local key set any larger than max + record.localKeys.PopAny() + + // create a new aggregate event, and return the aggregateKey as the cache key + // (so that it can be overwritten.) + eventCopy := &v1.Event{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%v.%x", newEvent.InvolvedObject.Name, now.UnixNano()), + Namespace: newEvent.Namespace, + }, + Count: 1, + FirstTimestamp: now, + InvolvedObject: newEvent.InvolvedObject, + LastTimestamp: now, + Message: e.messageFunc(newEvent), + Type: newEvent.Type, + Reason: newEvent.Reason, + Source: newEvent.Source, + } + return eventCopy, aggregateKey +} + +// eventLog records data about when an event was observed +type eventLog struct { + // The number of times the event has occurred since first occurrence. + count uint + + // The time at which the event was first recorded. + firstTimestamp metav1.Time + + // The unique name of the first occurrence of this event + name string + + // Resource version returned from previous interaction with server + resourceVersion string +} + +// eventLogger logs occurrences of an event +type eventLogger struct { + sync.RWMutex + cache *lru.Cache + clock clock.Clock +} + +// newEventLogger observes events and counts their frequencies +func newEventLogger(lruCacheEntries int, clock clock.Clock) *eventLogger { + return &eventLogger{cache: lru.New(lruCacheEntries), clock: clock} +} + +// eventObserve records an event, or updates an existing one if key is a cache hit +func (e *eventLogger) eventObserve(newEvent *v1.Event, key string) (*v1.Event, []byte, error) { + var ( + patch []byte + err error + ) + eventCopy := *newEvent + event := &eventCopy + + e.Lock() + defer e.Unlock() + + // Check if there is an existing event we should update + lastObservation := e.lastEventObservationFromCache(key) + + // If we found a result, prepare a patch + if lastObservation.count > 0 { + // update the event based on the last observation so patch will work as desired + event.Name = lastObservation.name + event.ResourceVersion = lastObservation.resourceVersion + event.FirstTimestamp = lastObservation.firstTimestamp + event.Count = int32(lastObservation.count) + 1 + + eventCopy2 := *event + eventCopy2.Count = 0 + eventCopy2.LastTimestamp = metav1.NewTime(time.Unix(0, 0)) + eventCopy2.Message = "" + + newData, _ := json.Marshal(event) + oldData, _ := json.Marshal(eventCopy2) + patch, err = strategicpatch.CreateTwoWayMergePatch(oldData, newData, event) + } + + // record our new observation + e.cache.Add( + key, + eventLog{ + count: uint(event.Count), + firstTimestamp: event.FirstTimestamp, + name: event.Name, + resourceVersion: event.ResourceVersion, + }, + ) + return event, patch, err +} + +// updateState updates its internal tracking information based on latest server state +func (e *eventLogger) updateState(event *v1.Event) { + key := getEventKey(event) + e.Lock() + defer e.Unlock() + // record our new observation + e.cache.Add( + key, + eventLog{ + count: uint(event.Count), + firstTimestamp: event.FirstTimestamp, + name: event.Name, + resourceVersion: event.ResourceVersion, + }, + ) +} + +// lastEventObservationFromCache returns the event from the cache, reads must be protected via external lock +func (e *eventLogger) lastEventObservationFromCache(key string) eventLog { + value, ok := e.cache.Get(key) + if ok { + observationValue, ok := value.(eventLog) + if ok { + return observationValue + } + } + return eventLog{} +} + +// EventCorrelator processes all incoming events and performs analysis to avoid overwhelming the system. It can filter all +// incoming events to see if the event should be filtered from further processing. It can aggregate similar events that occur +// frequently to protect the system from spamming events that are difficult for users to distinguish. It performs de-duplication +// to ensure events that are observed multiple times are compacted into a single event with increasing counts. +type EventCorrelator struct { + // the function to filter the event + filterFunc EventFilterFunc + // the object that performs event aggregation + aggregator *EventAggregator + // the object that observes events as they come through + logger *eventLogger +} + +// EventCorrelateResult is the result of a Correlate +type EventCorrelateResult struct { + // the event after correlation + Event *v1.Event + // if provided, perform a strategic patch when updating the record on the server + Patch []byte + // if true, do no further processing of the event + Skip bool +} + +// NewEventCorrelator returns an EventCorrelator configured with default values. +// +// The EventCorrelator is responsible for event filtering, aggregating, and counting +// prior to interacting with the API server to record the event. +// +// The default behavior is as follows: +// * Aggregation is performed if a similar event is recorded 10 times in a +// in a 10 minute rolling interval. A similar event is an event that varies only by +// the Event.Message field. Rather than recording the precise event, aggregation +// will create a new event whose message reports that it has combined events with +// the same reason. +// * Events are incrementally counted if the exact same event is encountered multiple +// times. +// * A source may burst 25 events about an object, but has a refill rate budget +// per object of 1 event every 5 minutes to control long-tail of spam. +func NewEventCorrelator(clock clock.Clock) *EventCorrelator { + cacheSize := maxLruCacheEntries + spamFilter := NewEventSourceObjectSpamFilter(cacheSize, defaultSpamBurst, defaultSpamQPS, clock) + return &EventCorrelator{ + filterFunc: spamFilter.Filter, + aggregator: NewEventAggregator( + cacheSize, + EventAggregatorByReasonFunc, + EventAggregatorByReasonMessageFunc, + defaultAggregateMaxEvents, + defaultAggregateIntervalInSeconds, + clock), + + logger: newEventLogger(cacheSize, clock), + } +} + +// EventCorrelate filters, aggregates, counts, and de-duplicates all incoming events +func (c *EventCorrelator) EventCorrelate(newEvent *v1.Event) (*EventCorrelateResult, error) { + if newEvent == nil { + return nil, fmt.Errorf("event is nil") + } + aggregateEvent, ckey := c.aggregator.EventAggregate(newEvent) + observedEvent, patch, err := c.logger.eventObserve(aggregateEvent, ckey) + if c.filterFunc(observedEvent) { + return &EventCorrelateResult{Skip: true}, nil + } + return &EventCorrelateResult{Event: observedEvent, Patch: patch}, err +} + +// UpdateState based on the latest observed state from server +func (c *EventCorrelator) UpdateState(event *v1.Event) { + c.logger.updateState(event) +} diff --git a/vendor/k8s.io/client-go/tools/record/fake.go b/vendor/k8s.io/client-go/tools/record/fake.go new file mode 100644 index 000000000..6e031daaf --- /dev/null +++ b/vendor/k8s.io/client-go/tools/record/fake.go @@ -0,0 +1,58 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package record + +import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// FakeRecorder is used as a fake during tests. It is thread safe. It is usable +// when created manually and not by NewFakeRecorder, however all events may be +// thrown away in this case. +type FakeRecorder struct { + Events chan string +} + +func (f *FakeRecorder) Event(object runtime.Object, eventtype, reason, message string) { + if f.Events != nil { + f.Events <- fmt.Sprintf("%s %s %s", eventtype, reason, message) + } +} + +func (f *FakeRecorder) Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) { + if f.Events != nil { + f.Events <- fmt.Sprintf(eventtype+" "+reason+" "+messageFmt, args...) + } +} + +func (f *FakeRecorder) PastEventf(object runtime.Object, timestamp metav1.Time, eventtype, reason, messageFmt string, args ...interface{}) { +} + +func (f *FakeRecorder) AnnotatedEventf(object runtime.Object, annotations map[string]string, eventtype, reason, messageFmt string, args ...interface{}) { + f.Eventf(object, eventtype, reason, messageFmt, args) +} + +// NewFakeRecorder creates new fake event recorder with event channel with +// buffer of given size. +func NewFakeRecorder(bufferSize int) *FakeRecorder { + return &FakeRecorder{ + Events: make(chan string, bufferSize), + } +} diff --git a/vendor/knative.dev/pkg/LICENSE b/vendor/knative.dev/pkg/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/knative.dev/pkg/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/knative/pkg/metrics/OWNERS b/vendor/knative.dev/pkg/apis/OWNERS similarity index 77% rename from vendor/github.com/knative/pkg/metrics/OWNERS rename to vendor/knative.dev/pkg/apis/OWNERS index e3999aaaf..a25420ebc 100644 --- a/vendor/github.com/knative/pkg/metrics/OWNERS +++ b/vendor/knative.dev/pkg/apis/OWNERS @@ -1,5 +1,4 @@ # The OWNERS file is used by prow to automatically merge approved PRs. approvers: -- mdemirhan -- yanweiguo +- apis-approvers diff --git a/vendor/github.com/knative/pkg/apis/condition_set.go b/vendor/knative.dev/pkg/apis/condition_set.go similarity index 100% rename from vendor/github.com/knative/pkg/apis/condition_set.go rename to vendor/knative.dev/pkg/apis/condition_set.go diff --git a/vendor/github.com/knative/pkg/apis/condition_types.go b/vendor/knative.dev/pkg/apis/condition_types.go similarity index 99% rename from vendor/github.com/knative/pkg/apis/condition_types.go rename to vendor/knative.dev/pkg/apis/condition_types.go index 8f5603c0f..31c566807 100644 --- a/vendor/github.com/knative/pkg/apis/condition_types.go +++ b/vendor/knative.dev/pkg/apis/condition_types.go @@ -55,6 +55,7 @@ const ( // Conditions defines a readiness condition for a Knative resource. // See: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#typical-status-properties // +k8s:deepcopy-gen=true +// +k8s:openapi-gen=true type Condition struct { // Type of condition. // +required diff --git a/vendor/knative.dev/pkg/apis/contexts.go b/vendor/knative.dev/pkg/apis/contexts.go new file mode 100644 index 000000000..287761e16 --- /dev/null +++ b/vendor/knative.dev/pkg/apis/contexts.go @@ -0,0 +1,182 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apis + +import ( + "context" + + authenticationv1 "k8s.io/api/authentication/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// This is attached to contexts passed to webhook interfaces when +// the receiver being validated is being created. +type inCreateKey struct{} + +// WithinCreate is used to note that the webhook is calling within +// the context of a Create operation. +func WithinCreate(ctx context.Context) context.Context { + return context.WithValue(ctx, inCreateKey{}, struct{}{}) +} + +// IsInCreate checks whether the context is a Create. +func IsInCreate(ctx context.Context) bool { + return ctx.Value(inCreateKey{}) != nil +} + +// This is attached to contexts passed to webhook interfaces when +// the receiver being validated is being updated. +type inUpdateKey struct{} + +type updatePayload struct { + base interface{} + subresource string +} + +// WithinUpdate is used to note that the webhook is calling within +// the context of a Update operation. +func WithinUpdate(ctx context.Context, base interface{}) context.Context { + return context.WithValue(ctx, inUpdateKey{}, &updatePayload{ + base: base, + }) +} + +// WithinSubResourceUpdate is used to note that the webhook is calling within +// the context of a Update operation on a subresource. +func WithinSubResourceUpdate(ctx context.Context, base interface{}, sr string) context.Context { + return context.WithValue(ctx, inUpdateKey{}, &updatePayload{ + base: base, + subresource: sr, + }) +} + +// IsInUpdate checks whether the context is an Update. +func IsInUpdate(ctx context.Context) bool { + return ctx.Value(inUpdateKey{}) != nil +} + +// IsInStatusUpdate checks whether the context is an Update. +func IsInStatusUpdate(ctx context.Context) bool { + value := ctx.Value(inUpdateKey{}) + if value == nil { + return false + } + up := value.(*updatePayload) + return up.subresource == "status" +} + +// GetBaseline returns the baseline of the update, or nil when we +// are not within an update context. +func GetBaseline(ctx context.Context) interface{} { + value := ctx.Value(inUpdateKey{}) + if value == nil { + return nil + } + return value.(*updatePayload).base +} + +// This is attached to contexts passed to webhook interfaces when +// the receiver being validated is being created. +type userInfoKey struct{} + +// WithUserInfo is used to note that the webhook is calling within +// the context of a Create operation. +func WithUserInfo(ctx context.Context, ui *authenticationv1.UserInfo) context.Context { + return context.WithValue(ctx, userInfoKey{}, ui) +} + +// GetUserInfo accesses the UserInfo attached to the webhook context. +func GetUserInfo(ctx context.Context) *authenticationv1.UserInfo { + if ui, ok := ctx.Value(userInfoKey{}).(*authenticationv1.UserInfo); ok { + return ui + } + return nil +} + +// This is attached to contexts as they are passed down through a resource +// being validated or defaulted to signal the ObjectMeta of the enclosing +// resource. +type parentMetaKey struct{} + +// WithinParent attaches the ObjectMeta of the resource enclosing the +// nested resources we are validating. This is intended for use with +// interfaces like apis.Defaultable and apis.Validatable. +func WithinParent(ctx context.Context, om metav1.ObjectMeta) context.Context { + return context.WithValue(ctx, parentMetaKey{}, om) +} + +// ParentMeta accesses the ObjectMeta of the enclosing parent resource +// from the context. See WithinParent for how to attach the parent's +// ObjectMeta to the context. +func ParentMeta(ctx context.Context) metav1.ObjectMeta { + if om, ok := ctx.Value(parentMetaKey{}).(metav1.ObjectMeta); ok { + return om + } + return metav1.ObjectMeta{} +} + +// This is attached to contexts as they are passed down through a resource +// being validated or defaulted to signal that we are within a Spec. +type inSpec struct{} + +// WithinSpec notes on the context that further validation or defaulting +// is within the context of a Spec. This is intended for use with +// interfaces like apis.Defaultable and apis.Validatable. +func WithinSpec(ctx context.Context) context.Context { + return context.WithValue(ctx, inSpec{}, struct{}{}) +} + +// IsInSpec returns whether the context of validation or defaulting is +// the Spec of the parent resource. +func IsInSpec(ctx context.Context) bool { + return ctx.Value(inSpec{}) != nil +} + +// This is attached to contexts as they are passed down through a resource +// being validated or defaulted to signal that we are within a Status. +type inStatus struct{} + +// WithinStatus notes on the context that further validation or defaulting +// is within the context of a Status. This is intended for use with +// interfaces like apis.Defaultable and apis.Validatable. +func WithinStatus(ctx context.Context) context.Context { + return context.WithValue(ctx, inStatus{}, struct{}{}) +} + +// IsInStatus returns whether the context of validation or defaulting is +// the Status of the parent resource. +func IsInStatus(ctx context.Context) bool { + return ctx.Value(inStatus{}) != nil +} + +// This is attached to contexts as they are passed down through a resource +// being validated to direct them to disallow deprecated fields. +type disallowDeprecated struct{} + +// DisallowDeprecated notes on the context that further validation +// should disallow the used of deprecated fields. This may be used +// to ensure that new paths through resources to a common type don't +// allow the mistakes of old versions to be introduced. +func DisallowDeprecated(ctx context.Context) context.Context { + return context.WithValue(ctx, disallowDeprecated{}, struct{}{}) +} + +// IsDeprecatedAllowed checks the context to see whether deprecated fields +// are allowed. +func IsDeprecatedAllowed(ctx context.Context) bool { + return ctx.Value(disallowDeprecated{}) == nil +} diff --git a/vendor/knative.dev/pkg/apis/deprecated.go b/vendor/knative.dev/pkg/apis/deprecated.go new file mode 100644 index 000000000..c73f5be7c --- /dev/null +++ b/vendor/knative.dev/pkg/apis/deprecated.go @@ -0,0 +1,180 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apis + +import ( + "context" + "reflect" + "strings" +) + +const ( + deprecatedPrefix = "Deprecated" +) + +// CheckDeprecated checks whether the provided named deprecated fields +// are set in a context where deprecation is disallowed. +// This is a shallow check. +func CheckDeprecated(ctx context.Context, obj interface{}) *FieldError { + return CheckDeprecatedUpdate(ctx, obj, nil) +} + +// CheckDeprecated checks whether the provided named deprecated fields +// are set in a context where deprecation is disallowed. +// This is a json shallow check. We will recursively check inlined structs. +func CheckDeprecatedUpdate(ctx context.Context, obj interface{}, original interface{}) *FieldError { + if IsDeprecatedAllowed(ctx) { + return nil + } + + var errs *FieldError + objFields, objInlined := getPrefixedNamedFieldValues(deprecatedPrefix, obj) + + if nonZero(reflect.ValueOf(original)) { + originalFields, originalInlined := getPrefixedNamedFieldValues(deprecatedPrefix, original) + + // We only have to walk obj Fields because the assumption is that obj + // and original are of the same type. + for name, value := range objFields { + if nonZero(value) { + if differ(originalFields[name], value) { + // Not allowed to update the value. + errs = errs.Also(ErrDisallowedUpdateDeprecatedFields(name)) + } + } + } + // Look for deprecated inlined updates. + if len(objInlined) > 0 { + for name, value := range objInlined { + errs = errs.Also(CheckDeprecatedUpdate(ctx, value, originalInlined[name])) + } + } + } else { + for name, value := range objFields { + if nonZero(value) { + // Not allowed to set the value. + errs = errs.Also(ErrDisallowedFields(name)) + } + } + // Look for deprecated inlined creates. + if len(objInlined) > 0 { + for _, value := range objInlined { + errs = errs.Also(CheckDeprecated(ctx, value)) + } + } + } + return errs +} + +func getPrefixedNamedFieldValues(prefix string, obj interface{}) (map[string]reflect.Value, map[string]interface{}) { + fields := make(map[string]reflect.Value, 0) + inlined := make(map[string]interface{}, 0) + + objValue := reflect.Indirect(reflect.ValueOf(obj)) + + // If res is not valid or a struct, don't even try to use it. + if !objValue.IsValid() || objValue.Kind() != reflect.Struct { + return fields, inlined + } + + for i := 0; i < objValue.NumField(); i++ { + tf := objValue.Type().Field(i) + if v := objValue.Field(i); v.IsValid() { + jTag := tf.Tag.Get("json") + if strings.HasPrefix(tf.Name, prefix) { + name := strings.Split(jTag, ",")[0] + if name == "" { + // Default to field name in go struct if no json name. + name = tf.Name + } + fields[name] = v + } else if jTag == ",inline" { + inlined[tf.Name] = getInterface(v) + } + } + } + return fields, inlined +} + +// getInterface returns the interface value of the reflected object. +func getInterface(a reflect.Value) interface{} { + switch a.Kind() { + case reflect.Ptr: + if a.IsNil() { + return nil + } + return a.Elem().Interface() + + case reflect.Map, reflect.Slice, reflect.Array: + return a.Elem().Interface() + + // This is a nil interface{} type. + case reflect.Invalid: + return nil + + default: + return a.Interface() + } +} + +// nonZero returns true if a is nil or reflect.Zero. +func nonZero(a reflect.Value) bool { + switch a.Kind() { + case reflect.Ptr: + if a.IsNil() { + return false + } + return nonZero(a.Elem()) + + case reflect.Map, reflect.Slice, reflect.Array: + if a.IsNil() { + return false + } + return true + + // This is a nil interface{} type. + case reflect.Invalid: + return false + + default: + if reflect.DeepEqual(a.Interface(), reflect.Zero(a.Type()).Interface()) { + return false + } + return true + } +} + +// differ returns true if a != b +func differ(a, b reflect.Value) bool { + if a.Kind() != b.Kind() { + return true + } + + switch a.Kind() { + case reflect.Ptr: + if a.IsNil() || b.IsNil() { + return a.IsNil() != b.IsNil() + } + return differ(a.Elem(), b.Elem()) + + default: + if reflect.DeepEqual(a.Interface(), b.Interface()) { + return false + } + return true + } +} diff --git a/vendor/github.com/knative/pkg/apis/doc.go b/vendor/knative.dev/pkg/apis/doc.go similarity index 100% rename from vendor/github.com/knative/pkg/apis/doc.go rename to vendor/knative.dev/pkg/apis/doc.go diff --git a/vendor/github.com/knative/pkg/kmeta/OWNERS b/vendor/knative.dev/pkg/apis/duck/OWNERS similarity index 76% rename from vendor/github.com/knative/pkg/kmeta/OWNERS rename to vendor/knative.dev/pkg/apis/duck/OWNERS index be0d3f1b4..ad4d83c51 100644 --- a/vendor/github.com/knative/pkg/kmeta/OWNERS +++ b/vendor/knative.dev/pkg/apis/duck/OWNERS @@ -1,5 +1,4 @@ # The OWNERS file is used by prow to automatically merge approved PRs. approvers: -- mattmoor -- jonjohnsonjr +- apis-duck-approvers diff --git a/vendor/github.com/knative/pkg/apis/duck/cached.go b/vendor/knative.dev/pkg/apis/duck/cached.go similarity index 100% rename from vendor/github.com/knative/pkg/apis/duck/cached.go rename to vendor/knative.dev/pkg/apis/duck/cached.go diff --git a/vendor/github.com/knative/pkg/apis/duck/doc.go b/vendor/knative.dev/pkg/apis/duck/doc.go similarity index 100% rename from vendor/github.com/knative/pkg/apis/duck/doc.go rename to vendor/knative.dev/pkg/apis/duck/doc.go diff --git a/vendor/github.com/knative/pkg/apis/duck/enqueue.go b/vendor/knative.dev/pkg/apis/duck/enqueue.go similarity index 100% rename from vendor/github.com/knative/pkg/apis/duck/enqueue.go rename to vendor/knative.dev/pkg/apis/duck/enqueue.go diff --git a/vendor/github.com/knative/pkg/apis/duck/interface.go b/vendor/knative.dev/pkg/apis/duck/interface.go similarity index 100% rename from vendor/github.com/knative/pkg/apis/duck/interface.go rename to vendor/knative.dev/pkg/apis/duck/interface.go diff --git a/vendor/github.com/knative/pkg/apis/duck/patch.go b/vendor/knative.dev/pkg/apis/duck/patch.go similarity index 100% rename from vendor/github.com/knative/pkg/apis/duck/patch.go rename to vendor/knative.dev/pkg/apis/duck/patch.go diff --git a/vendor/github.com/knative/pkg/apis/duck/proxy.go b/vendor/knative.dev/pkg/apis/duck/proxy.go similarity index 100% rename from vendor/github.com/knative/pkg/apis/duck/proxy.go rename to vendor/knative.dev/pkg/apis/duck/proxy.go diff --git a/vendor/github.com/knative/pkg/apis/duck/register.go b/vendor/knative.dev/pkg/apis/duck/register.go similarity index 100% rename from vendor/github.com/knative/pkg/apis/duck/register.go rename to vendor/knative.dev/pkg/apis/duck/register.go diff --git a/vendor/github.com/knative/pkg/apis/duck/typed.go b/vendor/knative.dev/pkg/apis/duck/typed.go similarity index 75% rename from vendor/github.com/knative/pkg/apis/duck/typed.go rename to vendor/knative.dev/pkg/apis/duck/typed.go index 9d29c1e0b..bf992c49f 100644 --- a/vendor/github.com/knative/pkg/apis/duck/typed.go +++ b/vendor/knative.dev/pkg/apis/duck/typed.go @@ -29,7 +29,7 @@ import ( "k8s.io/client-go/dynamic" "k8s.io/client-go/tools/cache" - "github.com/knative/pkg/apis" + "knative.dev/pkg/apis" ) // TypedInformerFactory implements InformerFactory such that the elements @@ -94,44 +94,36 @@ func AsStructuredWatcher(wf cache.WatchFunc, obj runtime.Object) cache.WatchFunc go func() { defer close(structuredCh) unstructuredCh := uw.ResultChan() - for { - select { - case ue, ok := <-unstructuredCh: - if !ok { - // Channel is closed. - return - } + for ue := range unstructuredCh { + unstructuredObj, ok := ue.Object.(*unstructured.Unstructured) + if !ok { + // If it isn't an unstructured object, then forward the + // event as-is. This is likely to happen when the event's + // Type is an Error. + structuredCh <- ue + continue + } + structuredObj := obj.DeepCopyObject() - unstructuredObj, ok := ue.Object.(*unstructured.Unstructured) - if !ok { - // If it isn't an unstructured object, then forward the - // event as-is. This is likely to happen when the event's - // Type is an Error. - structuredCh <- ue - continue - } - structuredObj := obj.DeepCopyObject() - - err := FromUnstructured(unstructuredObj, structuredObj) - if err != nil { - // Pass back an error indicating that the object we got - // was invalid. - structuredCh <- watch.Event{ - Type: watch.Error, - Object: &metav1.Status{ - Status: metav1.StatusFailure, - Code: http.StatusUnprocessableEntity, - Reason: metav1.StatusReasonInvalid, - Message: err.Error(), - }, - } - continue - } - // Send the structured event. + err := FromUnstructured(unstructuredObj, structuredObj) + if err != nil { + // Pass back an error indicating that the object we got + // was invalid. structuredCh <- watch.Event{ - Type: ue.Type, - Object: structuredObj, + Type: watch.Error, + Object: &metav1.Status{ + Status: metav1.StatusFailure, + Code: http.StatusUnprocessableEntity, + Reason: metav1.StatusReasonInvalid, + Message: err.Error(), + }, } + continue + } + // Send the structured event. + structuredCh <- watch.Event{ + Type: ue.Type, + Object: structuredObj, } } }() diff --git a/vendor/github.com/knative/pkg/apis/duck/unstructured.go b/vendor/knative.dev/pkg/apis/duck/unstructured.go similarity index 100% rename from vendor/github.com/knative/pkg/apis/duck/unstructured.go rename to vendor/knative.dev/pkg/apis/duck/unstructured.go diff --git a/vendor/knative.dev/pkg/apis/duck/v1beta1/addressable_types.go b/vendor/knative.dev/pkg/apis/duck/v1beta1/addressable_types.go new file mode 100644 index 000000000..817585a3b --- /dev/null +++ b/vendor/knative.dev/pkg/apis/duck/v1beta1/addressable_types.go @@ -0,0 +1,97 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + + "knative.dev/pkg/apis" + "knative.dev/pkg/apis/duck" +) + +// Addressable provides a generic mechanism for a custom resource +// definition to indicate a destination for message delivery. + +// Addressable is the schema for the destination information. This is +// typically stored in the object's `status`, as this information may +// be generated by the controller. +type Addressable struct { + URL *apis.URL `json:"url,omitempty"` +} + +// Addressable is an Implementable "duck type". +var _ duck.Implementable = (*Addressable)(nil) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// AddressableType is a skeleton type wrapping Addressable in the manner we expect +// resource writers defining compatible resources to embed it. We will +// typically use this type to deserialize Addressable ObjectReferences and +// access the Addressable data. This is not a real resource. +type AddressableType struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Status AddressStatus `json:"status"` +} + +// AddressStatus shows how we expect folks to embed Addressable in +// their Status field. +type AddressStatus struct { + Address *Addressable `json:"address,omitempty"` +} + +var ( + // Verify AddressableType resources meet duck contracts. + _ duck.Populatable = (*AddressableType)(nil) + _ apis.Listable = (*AddressableType)(nil) +) + +// GetFullType implements duck.Implementable +func (*Addressable) GetFullType() duck.Populatable { + return &AddressableType{} +} + +// Populate implements duck.Populatable +func (t *AddressableType) Populate() { + t.Status = AddressStatus{ + &Addressable{ + // Populate ALL fields + URL: &apis.URL{ + Scheme: "http", + Host: "foo.com", + }, + }, + } +} + +// GetListType implements apis.Listable +func (*AddressableType) GetListType() runtime.Object { + return &AddressableTypeList{} +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// AddressableTypeList is a list of AddressableType resources +type AddressableTypeList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []AddressableType `json:"items"` +} diff --git a/vendor/github.com/knative/pkg/apis/duck/v1beta1/doc.go b/vendor/knative.dev/pkg/apis/duck/v1beta1/doc.go similarity index 100% rename from vendor/github.com/knative/pkg/apis/duck/v1beta1/doc.go rename to vendor/knative.dev/pkg/apis/duck/v1beta1/doc.go diff --git a/vendor/github.com/knative/pkg/apis/duck/v1beta1/register.go b/vendor/knative.dev/pkg/apis/duck/v1beta1/register.go similarity index 94% rename from vendor/github.com/knative/pkg/apis/duck/v1beta1/register.go rename to vendor/knative.dev/pkg/apis/duck/v1beta1/register.go index b3e38c404..ca8388ad4 100644 --- a/vendor/github.com/knative/pkg/apis/duck/v1beta1/register.go +++ b/vendor/knative.dev/pkg/apis/duck/v1beta1/register.go @@ -17,10 +17,10 @@ limitations under the License. package v1beta1 import ( - "github.com/knative/pkg/apis/duck" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" + "knative.dev/pkg/apis/duck" ) // SchemeGroupVersion is group version used to register these objects @@ -47,6 +47,8 @@ func addKnownTypes(scheme *runtime.Scheme) error { SchemeGroupVersion, &KResource{}, (&KResource{}).GetListType(), + &AddressableType{}, + (&AddressableType{}).GetListType(), ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) return nil diff --git a/vendor/github.com/knative/pkg/apis/duck/v1beta1/status_types.go b/vendor/knative.dev/pkg/apis/duck/v1beta1/status_types.go similarity index 98% rename from vendor/github.com/knative/pkg/apis/duck/v1beta1/status_types.go rename to vendor/knative.dev/pkg/apis/duck/v1beta1/status_types.go index b999737ae..ef6804dea 100644 --- a/vendor/github.com/knative/pkg/apis/duck/v1beta1/status_types.go +++ b/vendor/knative.dev/pkg/apis/duck/v1beta1/status_types.go @@ -24,8 +24,8 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - "github.com/knative/pkg/apis" - "github.com/knative/pkg/apis/duck" + "knative.dev/pkg/apis" + "knative.dev/pkg/apis/duck" ) // Conditions is a simple wrapper around apis.Conditions to implement duck.Implementable. @@ -36,6 +36,7 @@ var _ duck.Implementable = (*Conditions)(nil) // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:openapi-gen=true // KResource is a skeleton type wrapping Conditions in the manner we expect // resource writers defining compatible resources to embed it. We will diff --git a/vendor/github.com/knative/pkg/apis/duck/v1beta1/zz_generated.deepcopy.go b/vendor/knative.dev/pkg/apis/duck/v1beta1/zz_generated.deepcopy.go similarity index 54% rename from vendor/github.com/knative/pkg/apis/duck/v1beta1/zz_generated.deepcopy.go rename to vendor/knative.dev/pkg/apis/duck/v1beta1/zz_generated.deepcopy.go index bdbc0471c..329aabb64 100644 --- a/vendor/github.com/knative/pkg/apis/duck/v1beta1/zz_generated.deepcopy.go +++ b/vendor/knative.dev/pkg/apis/duck/v1beta1/zz_generated.deepcopy.go @@ -22,8 +22,111 @@ package v1beta1 import ( runtime "k8s.io/apimachinery/pkg/runtime" + apis "knative.dev/pkg/apis" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AddressStatus) DeepCopyInto(out *AddressStatus) { + *out = *in + if in.Address != nil { + in, out := &in.Address, &out.Address + *out = new(Addressable) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AddressStatus. +func (in *AddressStatus) DeepCopy() *AddressStatus { + if in == nil { + return nil + } + out := new(AddressStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Addressable) DeepCopyInto(out *Addressable) { + *out = *in + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(apis.URL) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Addressable. +func (in *Addressable) DeepCopy() *Addressable { + if in == nil { + return nil + } + out := new(Addressable) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AddressableType) DeepCopyInto(out *AddressableType) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AddressableType. +func (in *AddressableType) DeepCopy() *AddressableType { + if in == nil { + return nil + } + out := new(AddressableType) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AddressableType) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AddressableTypeList) DeepCopyInto(out *AddressableTypeList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]AddressableType, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AddressableTypeList. +func (in *AddressableTypeList) DeepCopy() *AddressableTypeList { + if in == nil { + return nil + } + out := new(AddressableTypeList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AddressableTypeList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in Conditions) DeepCopyInto(out *Conditions) { { diff --git a/vendor/github.com/knative/pkg/apis/duck/verify.go b/vendor/knative.dev/pkg/apis/duck/verify.go similarity index 99% rename from vendor/github.com/knative/pkg/apis/duck/verify.go rename to vendor/knative.dev/pkg/apis/duck/verify.go index eb6bdebf4..236a392c7 100644 --- a/vendor/github.com/knative/pkg/apis/duck/verify.go +++ b/vendor/knative.dev/pkg/apis/duck/verify.go @@ -20,7 +20,7 @@ import ( "encoding/json" "fmt" - "github.com/knative/pkg/kmp" + "knative.dev/pkg/kmp" ) // Implementable is implemented by the Fooable duck type that consumers diff --git a/vendor/github.com/knative/pkg/apis/field_error.go b/vendor/knative.dev/pkg/apis/field_error.go similarity index 91% rename from vendor/github.com/knative/pkg/apis/field_error.go rename to vendor/knative.dev/pkg/apis/field_error.go index da498281b..59b281d6e 100644 --- a/vendor/github.com/knative/pkg/apis/field_error.go +++ b/vendor/knative.dev/pkg/apis/field_error.go @@ -20,6 +20,8 @@ import ( "fmt" "sort" "strings" + + "knative.dev/pkg/kmp" ) // CurrentField is a constant to supply as a fieldPath for when there is @@ -300,6 +302,15 @@ func ErrDisallowedFields(fieldPaths ...string) *FieldError { } } +// ErrDisallowedUpdateDeprecatedFields is a variadic helper method for +// constructing a FieldError for updating of deprecated fields. +func ErrDisallowedUpdateDeprecatedFields(fieldPaths ...string) *FieldError { + return &FieldError{ + Message: "must not update deprecated field(s)", + Paths: fieldPaths, + } +} + // ErrInvalidArrayValue constructs a FieldError for a repetetive `field` // at `index` that has received an invalid string value. func ErrInvalidArrayValue(value interface{}, field string, index int) *FieldError { @@ -351,3 +362,18 @@ func ErrOutOfBoundsValue(value, lower, upper interface{}, fieldPath string) *Fie Paths: []string{fieldPath}, } } + +// CheckDisallowedFields compares the request object against a masked request object. Fields +// that are set in the request object that are unset in the mask are reported back as disallowed fields. If +// there is an error comparing the two objects FieldError of "Internal Error" is returned. +func CheckDisallowedFields(request, maskedRequest interface{}) *FieldError { + if disallowed, err := kmp.CompareSetFields(request, maskedRequest); err != nil { + return &FieldError{ + Message: fmt.Sprintf("Internal Error"), + Paths: []string{CurrentField}, + } + } else if len(disallowed) > 0 { + return ErrDisallowedFields(disallowed...) + } + return nil +} diff --git a/vendor/github.com/knative/pkg/apis/interfaces.go b/vendor/knative.dev/pkg/apis/interfaces.go similarity index 90% rename from vendor/github.com/knative/pkg/apis/interfaces.go rename to vendor/knative.dev/pkg/apis/interfaces.go index 601d083dd..6b6c772d7 100644 --- a/vendor/github.com/knative/pkg/apis/interfaces.go +++ b/vendor/knative.dev/pkg/apis/interfaces.go @@ -66,3 +66,10 @@ type Listable interface { // The webhook functionality for this has been turned down, which is why this // interface is empty. type Annotatable interface{} + +// HasSpec indicates that a particular type has a specification information +// and that information is retrievable. +type HasSpec interface { + // GetUntypedSpec returns the spec of the resource. + GetUntypedSpec() interface{} +} diff --git a/vendor/github.com/knative/pkg/apis/kind2resource.go b/vendor/knative.dev/pkg/apis/kind2resource.go similarity index 100% rename from vendor/github.com/knative/pkg/apis/kind2resource.go rename to vendor/knative.dev/pkg/apis/kind2resource.go diff --git a/vendor/github.com/knative/pkg/apis/metadata_validation.go b/vendor/knative.dev/pkg/apis/metadata_validation.go similarity index 100% rename from vendor/github.com/knative/pkg/apis/metadata_validation.go rename to vendor/knative.dev/pkg/apis/metadata_validation.go diff --git a/vendor/knative.dev/pkg/apis/url.go b/vendor/knative.dev/pkg/apis/url.go new file mode 100644 index 000000000..c0402016f --- /dev/null +++ b/vendor/knative.dev/pkg/apis/url.go @@ -0,0 +1,73 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apis + +import ( + "encoding/json" + "fmt" + "net/url" +) + +// URL is an alias of url.URL. +// It has custom json marshal methods that enable it to be used in K8s CRDs +// such that the CRD resource will have the URL but operator code can can work with url.URL struct +type URL url.URL + +// ParseURL attempts to parse the given string as a URL. +func ParseURL(u string) (*URL, error) { + if u == "" { + return nil, nil + } + pu, err := url.Parse(u) + if err != nil { + return nil, err + } + return (*URL)(pu), nil +} + +// MarshalJSON implements a custom json marshal method used when this type is +// marshaled using json.Marshal. +// json.Marshaler impl +func (u URL) MarshalJSON() ([]byte, error) { + b := fmt.Sprintf("%q", u.String()) + return []byte(b), nil +} + +// UnmarshalJSON implements the json unmarshal method used when this type is +// unmarsheled using json.Unmarshal. +// json.Unmarshaler impl +func (u *URL) UnmarshalJSON(b []byte) error { + var ref string + if err := json.Unmarshal(b, &ref); err != nil { + return err + } + r, err := ParseURL(ref) + if err != nil { + return err + } + *u = *r + return nil +} + +// String returns the full string representation of the URL. +func (u *URL) String() string { + if u == nil { + return "" + } + uu := url.URL(*u) + return uu.String() +} diff --git a/vendor/github.com/knative/pkg/apis/volatile_time.go b/vendor/knative.dev/pkg/apis/volatile_time.go similarity index 97% rename from vendor/github.com/knative/pkg/apis/volatile_time.go rename to vendor/knative.dev/pkg/apis/volatile_time.go index 3d2daa277..48d790d92 100644 --- a/vendor/github.com/knative/pkg/apis/volatile_time.go +++ b/vendor/knative.dev/pkg/apis/volatile_time.go @@ -22,6 +22,7 @@ import ( ) // VolatileTime wraps metav1.Time +// +k8s:openapi-gen=true type VolatileTime struct { Inner metav1.Time } diff --git a/vendor/github.com/knative/pkg/apis/zz_generated.deepcopy.go b/vendor/knative.dev/pkg/apis/zz_generated.deepcopy.go similarity index 84% rename from vendor/github.com/knative/pkg/apis/zz_generated.deepcopy.go rename to vendor/knative.dev/pkg/apis/zz_generated.deepcopy.go index f32afcd0f..be670d4a8 100644 --- a/vendor/github.com/knative/pkg/apis/zz_generated.deepcopy.go +++ b/vendor/knative.dev/pkg/apis/zz_generated.deepcopy.go @@ -20,6 +20,10 @@ limitations under the License. package apis +import ( + url "net/url" +) + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Condition) DeepCopyInto(out *Condition) { *out = *in @@ -87,6 +91,27 @@ func (in *FieldError) DeepCopy() *FieldError { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *URL) DeepCopyInto(out *URL) { + *out = *in + if in.User != nil { + in, out := &in.User, &out.User + *out = new(url.Userinfo) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new URL. +func (in *URL) DeepCopy() *URL { + if in == nil { + return nil + } + out := new(URL) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *VolatileTime) DeepCopyInto(out *VolatileTime) { *out = *in diff --git a/vendor/github.com/knative/pkg/changeset/commit.go b/vendor/knative.dev/pkg/changeset/commit.go similarity index 100% rename from vendor/github.com/knative/pkg/changeset/commit.go rename to vendor/knative.dev/pkg/changeset/commit.go diff --git a/vendor/github.com/knative/pkg/changeset/doc.go b/vendor/knative.dev/pkg/changeset/doc.go similarity index 100% rename from vendor/github.com/knative/pkg/changeset/doc.go rename to vendor/knative.dev/pkg/changeset/doc.go diff --git a/vendor/github.com/knative/pkg/apis/duck/OWNERS b/vendor/knative.dev/pkg/configmap/OWNERS similarity index 75% rename from vendor/github.com/knative/pkg/apis/duck/OWNERS rename to vendor/knative.dev/pkg/configmap/OWNERS index 0f60a8a2f..2480fc6d4 100644 --- a/vendor/github.com/knative/pkg/apis/duck/OWNERS +++ b/vendor/knative.dev/pkg/configmap/OWNERS @@ -1,5 +1,4 @@ # The OWNERS file is used by prow to automatically merge approved PRs. approvers: -- mattmoor -- vaikas-google +- configmap-approvers diff --git a/vendor/github.com/knative/pkg/configmap/doc.go b/vendor/knative.dev/pkg/configmap/doc.go similarity index 100% rename from vendor/github.com/knative/pkg/configmap/doc.go rename to vendor/knative.dev/pkg/configmap/doc.go diff --git a/vendor/github.com/knative/pkg/configmap/filter.go b/vendor/knative.dev/pkg/configmap/filter.go similarity index 100% rename from vendor/github.com/knative/pkg/configmap/filter.go rename to vendor/knative.dev/pkg/configmap/filter.go diff --git a/vendor/github.com/knative/pkg/configmap/informed_watcher.go b/vendor/knative.dev/pkg/configmap/informed_watcher.go similarity index 58% rename from vendor/github.com/knative/pkg/configmap/informed_watcher.go rename to vendor/knative.dev/pkg/configmap/informed_watcher.go index 2e8b492e5..5903d59d7 100644 --- a/vendor/github.com/knative/pkg/configmap/informed_watcher.go +++ b/vendor/knative.dev/pkg/configmap/informed_watcher.go @@ -8,7 +8,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software -istributed under the License is istributed on an "AS IS" BASIS, +distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. @@ -21,6 +21,7 @@ import ( "time" corev1 "k8s.io/api/core/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" informers "k8s.io/client-go/informers" corev1informers "k8s.io/client-go/informers/core/v1" "k8s.io/client-go/kubernetes" @@ -34,7 +35,7 @@ func NewDefaultWatcher(kc kubernetes.Interface, namespace string) *InformedWatch return NewInformedWatcher(kc, namespace) } -// NewInformedWatcherFromFactory watchers a Kubernetes namespace for configmap changs +// NewInformedWatcherFromFactory watches a Kubernetes namespace for configmap changes. func NewInformedWatcherFromFactory(sif informers.SharedInformerFactory, namespace string) *InformedWatcher { return &InformedWatcher{ sif: sif, @@ -42,10 +43,11 @@ func NewInformedWatcherFromFactory(sif informers.SharedInformerFactory, namespac ManualWatcher: ManualWatcher{ Namespace: namespace, }, + defaults: make(map[string]*corev1.ConfigMap), } } -// NewInformedWatcher watchers a Kubernetes namespace for configmap changs +// NewInformedWatcher watches a Kubernetes namespace for configmap changes. func NewInformedWatcher(kc kubernetes.Interface, namespace string) *InformedWatcher { return NewInformedWatcherFromFactory(informers.NewSharedInformerFactoryWithOptions( kc, @@ -61,24 +63,58 @@ type InformedWatcher struct { informer corev1informers.ConfigMapInformer started bool + // defaults are the default ConfigMaps to use if the real ones do not exist or are deleted. + defaults map[string]*corev1.ConfigMap + // Embedding this struct allows us to reuse the logic // of registering and notifying observers. This simplifies the - // InformedWatcher to just setting up the Kubernetes informer + // InformedWatcher to just setting up the Kubernetes informer. ManualWatcher } // Asserts that InformedWatcher implements Watcher. var _ Watcher = (*InformedWatcher)(nil) -// Start implements Watcher +// Asserts that InformedWatcher implements DefaultingWatcher. +var _ DefaultingWatcher = (*InformedWatcher)(nil) + +// WatchWithDefault implements DefaultingWatcher. +func (i *InformedWatcher) WatchWithDefault(cm corev1.ConfigMap, o Observer) { + i.defaults[cm.Name] = &cm + + i.m.Lock() + started := i.started + i.m.Unlock() + if started { + // TODO make both Watch and WatchWithDefault work after the InformedWatcher has started. + // This likely entails changing this to `o(&cm)` and having Watch check started, if it has + // started, then ensuring i.informer.Lister().ConfigMaps(i.Namespace).Get(cmName) exists and + // calling this observer on it. It may require changing Watch and WatchWithDefault to return + // an error. + panic("cannot WatchWithDefault after the InformedWatcher has started") + } + + i.Watch(cm.Name, o) +} + +// Start implements Watcher. func (i *InformedWatcher) Start(stopCh <-chan struct{}) error { + // Pretend that all the defaulted ConfigMaps were just created. This is done before we start + // the informer to ensure that if a defaulted ConfigMap does exist, then the real value is + // processed after the default one. + for k := range i.observers { + if def, ok := i.defaults[k]; ok { + i.addConfigMapEvent(def) + } + } + if err := i.registerCallbackAndStartInformer(stopCh); err != nil { return err } // Wait until it has been synced (WITHOUT holing the mutex, so callbacks happen) if ok := cache.WaitForCacheSync(stopCh, i.informer.Informer().HasSynced); !ok { - return errors.New("Error waiting for ConfigMap informer to sync.") + return errors.New("error waiting for ConfigMap informer to sync") } return i.checkObservedResourcesExist() @@ -88,16 +124,17 @@ func (i *InformedWatcher) registerCallbackAndStartInformer(stopCh <-chan struct{ i.m.Lock() defer i.m.Unlock() if i.started { - return errors.New("Watcher already started!") + return errors.New("watcher already started") } i.started = true i.informer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: i.addConfigMapEvent, UpdateFunc: i.updateConfigMapEvent, + DeleteFunc: i.deleteConfigMapEvent, }) - // Start the shared informer factory (non-blocking) + // Start the shared informer factory (non-blocking). i.sif.Start(stopCh) return nil } @@ -109,6 +146,12 @@ func (i *InformedWatcher) checkObservedResourcesExist() error { for k := range i.observers { _, err := i.informer.Lister().ConfigMaps(i.Namespace).Get(k) if err != nil { + if k8serrors.IsNotFound(err) { + if _, ok := i.defaults[k]; ok { + // It is defaulted, so it is OK that it doesn't exist. + continue + } + } return err } } @@ -124,3 +167,11 @@ func (i *InformedWatcher) updateConfigMapEvent(old, new interface{}) { configMap := new.(*corev1.ConfigMap) i.OnChange(configMap) } + +func (i *InformedWatcher) deleteConfigMapEvent(obj interface{}) { + configMap := obj.(*corev1.ConfigMap) + if def, ok := i.defaults[configMap.Name]; ok { + i.OnChange(def) + } + // If there is no default value, then don't do anything. +} diff --git a/vendor/github.com/knative/pkg/configmap/load.go b/vendor/knative.dev/pkg/configmap/load.go similarity index 100% rename from vendor/github.com/knative/pkg/configmap/load.go rename to vendor/knative.dev/pkg/configmap/load.go diff --git a/vendor/github.com/knative/pkg/configmap/manual_watcher.go b/vendor/knative.dev/pkg/configmap/manual_watcher.go similarity index 99% rename from vendor/github.com/knative/pkg/configmap/manual_watcher.go rename to vendor/knative.dev/pkg/configmap/manual_watcher.go index b14c5ac7b..759641058 100644 --- a/vendor/github.com/knative/pkg/configmap/manual_watcher.go +++ b/vendor/knative.dev/pkg/configmap/manual_watcher.go @@ -29,7 +29,6 @@ type ManualWatcher struct { // Guards mutations to defaultImpl fields m sync.Mutex - started bool observers map[string][]Observer } diff --git a/vendor/github.com/knative/pkg/configmap/static_watcher.go b/vendor/knative.dev/pkg/configmap/static_watcher.go similarity index 100% rename from vendor/github.com/knative/pkg/configmap/static_watcher.go rename to vendor/knative.dev/pkg/configmap/static_watcher.go diff --git a/vendor/github.com/knative/pkg/configmap/store.go b/vendor/knative.dev/pkg/configmap/store.go similarity index 96% rename from vendor/github.com/knative/pkg/configmap/store.go rename to vendor/knative.dev/pkg/configmap/store.go index 62cab4324..452830eb6 100644 --- a/vendor/github.com/knative/pkg/configmap/store.go +++ b/vendor/knative.dev/pkg/configmap/store.go @@ -23,6 +23,9 @@ import ( corev1 "k8s.io/api/core/v1" ) +// ExampleKey signifies a given example configuration in a ConfigMap. +const ExampleKey = "_example" + // Logger is the interface that UntypedStore expects its logger to conform to. // UntypedStore will log when updates succeed or fail. type Logger interface { @@ -44,7 +47,7 @@ type Constructors map[string]interface{} // An UntypedStore is a responsible for storing and // constructing configs from Kubernetes ConfigMaps // -// WatchConfigs should be used with a configmap,Watcher +// WatchConfigs should be used with a configmap.Watcher // in order for this store to remain up to date type UntypedStore struct { name string diff --git a/vendor/github.com/knative/pkg/configmap/watcher.go b/vendor/knative.dev/pkg/configmap/watcher.go similarity index 70% rename from vendor/github.com/knative/pkg/configmap/watcher.go rename to vendor/knative.dev/pkg/configmap/watcher.go index d248bbd73..71a18f495 100644 --- a/vendor/github.com/knative/pkg/configmap/watcher.go +++ b/vendor/knative.dev/pkg/configmap/watcher.go @@ -26,7 +26,7 @@ import ( // contents). type Observer func(*corev1.ConfigMap) -// Watcher defined the interface that a configmap implementation must implement. +// Watcher defines the interface that a configmap implementation must implement. type Watcher interface { // Watch is called to register a callback to be notified when a named ConfigMap changes. Watch(string, Observer) @@ -36,3 +36,14 @@ type Watcher interface { // initial state of the ConfigMaps they are watching. Start(<-chan struct{}) error } + +// DefaultingWatcher is similar to Watcher, but if a ConfigMap is absent, then a code provided +// default will be used. +type DefaultingWatcher interface { + Watcher + + // WatchWithDefault is called to register a callback to be notified when a named ConfigMap + // changes. The provided default value is always observed before any real ConfigMap with that + // name is. If the real ConfigMap with that name is deleted, then the default value is observed. + WatchWithDefault(cm corev1.ConfigMap, o Observer) +} diff --git a/vendor/knative.dev/pkg/controller/OWNERS b/vendor/knative.dev/pkg/controller/OWNERS new file mode 100644 index 000000000..afa22257a --- /dev/null +++ b/vendor/knative.dev/pkg/controller/OWNERS @@ -0,0 +1,4 @@ +# The OWNERS file is used by prow to automatically merge approved PRs. + +approvers: +- controller-approvers diff --git a/vendor/github.com/knative/pkg/controller/controller.go b/vendor/knative.dev/pkg/controller/controller.go similarity index 76% rename from vendor/github.com/knative/pkg/controller/controller.go rename to vendor/knative.dev/pkg/controller/controller.go index 5131f3e70..355435525 100644 --- a/vendor/github.com/knative/pkg/controller/controller.go +++ b/vendor/knative.dev/pkg/controller/controller.go @@ -22,21 +22,28 @@ import ( "sync" "time" + "github.com/google/uuid" + "go.uber.org/zap" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/record" "k8s.io/client-go/util/workqueue" - "github.com/knative/pkg/kmeta" - "github.com/knative/pkg/logging" - "github.com/knative/pkg/logging/logkey" + "knative.dev/pkg/kmeta" + "knative.dev/pkg/logging" + "knative.dev/pkg/logging/logkey" ) const ( falseString = "false" trueString = "true" + + // DefaultResyncPeriod is the default duration that is used when no + // resync period is associated with a controllers initialization context. + DefaultResyncPeriod = 10 * time.Hour ) var ( @@ -63,6 +70,17 @@ func PassNew(f func(interface{})) func(interface{}, interface{}) { } } +// HandleAll wraps the provided handler function into a cache.ResourceEventHandler +// that sends all events to the given handler. For Updates, only the new object +// is forwarded. +func HandleAll(h func(interface{})) cache.ResourceEventHandler { + return cache.ResourceEventHandlerFuncs{ + AddFunc: h, + UpdateFunc: PassNew(h), + DeleteFunc: h, + } +} + // Filter makes it simple to create FilterFunc's for use with // cache.FilteringResourceEventHandler that filter based on the // schema.GroupVersionKind of the controlling resources. @@ -78,6 +96,18 @@ func Filter(gvk schema.GroupVersionKind) func(obj interface{}) bool { } } +// FilterWithNameAndNamespace makes it simple to create FilterFunc's for use with +// cache.FilteringResourceEventHandler that filter based on a namespace and a name. +func FilterWithNameAndNamespace(namespace, name string) func(obj interface{}) bool { + return func(obj interface{}) bool { + if object, ok := obj.(metav1.Object); ok { + return name == object.GetName() && + namespace == object.GetNamespace() + } + return false + } +} + // Impl is our core controller implementation. It handles queuing and feeding work // from the queue to an implementation of Reconciler. type Impl struct { @@ -105,7 +135,11 @@ type Impl struct { // NewImpl instantiates an instance of our controller that will feed work to the // provided Reconciler as it is enqueued. -func NewImpl(r Reconciler, logger *zap.SugaredLogger, workQueueName string, reporter StatsReporter) *Impl { +func NewImpl(r Reconciler, logger *zap.SugaredLogger, workQueueName string) *Impl { + return NewImplWithStats(r, logger, workQueueName, MustNewStatsReporter(workQueueName, logger)) +} + +func NewImplWithStats(r Reconciler, logger *zap.SugaredLogger, workQueueName string, reporter StatsReporter) *Impl { return &Impl{ Reconciler: r, WorkQueue: workqueue.NewNamedRateLimitingQueue( @@ -117,6 +151,17 @@ func NewImpl(r Reconciler, logger *zap.SugaredLogger, workQueueName string, repo } } +// EnqueueAfter takes a resource, converts it into a namespace/name string, +// and passes it to EnqueueKey. +func (c *Impl) EnqueueAfter(obj interface{}, after time.Duration) { + key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) + if err != nil { + c.logger.Errorw("Enqueue", zap.Error(err)) + return + } + c.EnqueueKeyAfter(key, after) +} + // Enqueue takes a resource, converts it into a namespace/name string, // and passes it to EnqueueKey. func (c *Impl) Enqueue(obj interface{}) { @@ -212,6 +257,12 @@ func (c *Impl) EnqueueKey(key string) { c.WorkQueue.Add(key) } +// EnqueueKeyAfter takes a namespace/name string and schedules its execution in +// the work queue after given delay. +func (c *Impl) EnqueueKeyAfter(key string, delay time.Duration) { + c.WorkQueue.AddAfter(key, delay) +} + // Run starts the controller's worker threads, the number of which is threadiness. // It then blocks until stopCh is closed, at which point it shuts down its internal // work queue and waits for workers to finish processing their current work items. @@ -271,7 +322,7 @@ func (c *Impl) processNextWorkItem() bool { // Embed the key into the logger and attach that to the context we pass // to the Reconciler. - logger := c.logger.With(zap.String(logkey.Key, key)) + logger := c.logger.With(zap.String(logkey.TraceId, uuid.New().String()), zap.String(logkey.Key, key)) ctx := logging.WithLogger(context.TODO(), logger) // Run Reconcile, passing it the namespace/name string of the @@ -377,3 +428,47 @@ func StartAll(stopCh <-chan struct{}, controllers ...*Impl) { } wg.Wait() } + +// This is attached to contexts passed to controller constructors to associate +// a resync period. +type resyncPeriodKey struct{} + +// WithResyncPeriod associates the given resync period with the given context in +// the context that is returned. +func WithResyncPeriod(ctx context.Context, resync time.Duration) context.Context { + return context.WithValue(ctx, resyncPeriodKey{}, resync) +} + +// GetResyncPeriod returns the resync period associated with the given context. +// When none is specified a default resync period is used. +func GetResyncPeriod(ctx context.Context) time.Duration { + rp := ctx.Value(resyncPeriodKey{}) + if rp == nil { + return DefaultResyncPeriod + } + return rp.(time.Duration) +} + +// GetTrackerLease fetches the tracker lease from the controller context. +func GetTrackerLease(ctx context.Context) time.Duration { + return 3 * GetResyncPeriod(ctx) +} + +// erKey is used to associate record.EventRecorders with contexts. +type erKey struct{} + +// WithEventRecorder attaches the given record.EventRecorder to the provided context +// in the returned context. +func WithEventRecorder(ctx context.Context, er record.EventRecorder) context.Context { + return context.WithValue(ctx, erKey{}, er) +} + +// GetEventRecorder attempts to look up the record.EventRecorder on a given context. +// It may return null if none is found. +func GetEventRecorder(ctx context.Context) record.EventRecorder { + untyped := ctx.Value(erKey{}) + if untyped == nil { + return nil + } + return untyped.(record.EventRecorder) +} diff --git a/vendor/github.com/knative/pkg/controller/helper.go b/vendor/knative.dev/pkg/controller/helper.go similarity index 70% rename from vendor/github.com/knative/pkg/controller/helper.go rename to vendor/knative.dev/pkg/controller/helper.go index 5e74aaa06..b326cc5b1 100644 --- a/vendor/github.com/knative/pkg/controller/helper.go +++ b/vendor/knative.dev/pkg/controller/helper.go @@ -19,8 +19,9 @@ package controller import ( "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/tools/cache" - "github.com/knative/pkg/kmeta" + "knative.dev/pkg/kmeta" ) type Callback func(interface{}) @@ -50,3 +51,17 @@ func EnsureTypeMeta(f Callback, gvk schema.GroupVersionKind) Callback { f(copy) } } + +// SendGlobalUpdates triggers an update event for all objects from the +// passed SharedInformer. +// +// Since this is triggered not by a real update of these objects +// themselves, we have no way of knowing the change to these objects +// if any, so we call handler.OnUpdate(obj, obj) for all of them +// regardless if they have changes or not. +func SendGlobalUpdates(si cache.SharedInformer, handler cache.ResourceEventHandler) { + store := si.GetStore() + for _, obj := range store.List() { + handler.OnUpdate(obj, obj) + } +} diff --git a/vendor/github.com/knative/pkg/controller/stats_reporter.go b/vendor/knative.dev/pkg/controller/stats_reporter.go similarity index 91% rename from vendor/github.com/knative/pkg/controller/stats_reporter.go rename to vendor/knative.dev/pkg/controller/stats_reporter.go index 2b0cc8231..60a9157c5 100644 --- a/vendor/github.com/knative/pkg/controller/stats_reporter.go +++ b/vendor/knative.dev/pkg/controller/stats_reporter.go @@ -21,10 +21,11 @@ import ( "errors" "time" - "github.com/knative/pkg/metrics" "go.opencensus.io/stats" "go.opencensus.io/stats/view" "go.opencensus.io/tag" + "go.uber.org/zap" + "knative.dev/pkg/metrics" ) var ( @@ -103,6 +104,16 @@ func NewStatsReporter(reconciler string) (StatsReporter, error) { return &reporter{reconciler: reconciler, globalCtx: ctx}, nil } +// MustNewStatsReporter creates a new instance of StatsReporter. +// Logs fatally if creation fails. +func MustNewStatsReporter(reconciler string, logger *zap.SugaredLogger) StatsReporter { + stats, err := NewStatsReporter(reconciler) + if err != nil { + logger.Fatalw("Failed to initialize the stats reporter", zap.Error(err)) + } + return stats +} + // ReportQueueDepth reports the queue depth metric func (r *reporter) ReportQueueDepth(v int64) error { if r.globalCtx == nil { diff --git a/vendor/github.com/knative/pkg/configmap/OWNERS b/vendor/knative.dev/pkg/injection/OWNERS similarity index 88% rename from vendor/github.com/knative/pkg/configmap/OWNERS rename to vendor/knative.dev/pkg/injection/OWNERS index 5fe763218..dda47512a 100644 --- a/vendor/github.com/knative/pkg/configmap/OWNERS +++ b/vendor/knative.dev/pkg/injection/OWNERS @@ -2,4 +2,4 @@ approvers: - mattmoor -- mdemirhan +- n3wscott diff --git a/vendor/knative.dev/pkg/injection/README.md b/vendor/knative.dev/pkg/injection/README.md new file mode 100644 index 000000000..8f9712b71 --- /dev/null +++ b/vendor/knative.dev/pkg/injection/README.md @@ -0,0 +1,218 @@ +# Knative Dependency Injection + +This library supports the production of controller processes with minimal +boilerplate outside of the reconciler implementation. + +## Building Controllers + +To adopt this model of controller construction, implementations should start +with the following controller constructor: + +```go +import ( + "context" + + "knative.dev/pkg/configmap" + "knative.dev/pkg/controller" + "knative.dev/pkg/logging" +) + +func NewController(ctx context.Context, cmw configmap.Watcher) *controller.Impl { + logger := logging.FromContext(ctx) + + // TODO(you): Access informers + + c := &Reconciler{ + // TODO(you): Pass listers, clients, and other stuff. + } + impl := controller.NewImpl(c, logger, "NameOfController") + + // TODO(you): Set up event handlers. + + return impl +} +``` + +## Consuming Informers + +Knative controllers use "informers" to set up the various event hooks needed to +queue work, and pass the "listers" fed by the informers' caches to the nested +"Reconciler" for accessing objects. + +Our controller constructor is passed a `context.Context` onto which we inject +any informers we access. The accessors for these informers are in little stub +libraries, which we have hand rolled for Kubernetes (more on how to generate +these below). + +```go +import ( + // These are how you access a client or informer off of the "ctx" passed + // to set up the controller. + "knative.dev/pkg/injection/clients/kubeclient" + svcinformer "knative.dev/pkg/injection/informers/kubeinformers/corev1/service" + + // Other imports ... +) + +func NewController(ctx context.Context, cmw configmap.Watcher) *controller.Impl { + logger := logging.FromContext(ctx) + + // Access informers + svcInformer := svcinformer.Get(ctx) + + c := &Reconciler{ + // Pass the lister and client to the Reconciler. + Client: kubeclient.Get(ctx), + ServiceLister: svcInformer.Lister(), + } + impl := controller.NewImpl(c, logger, "NameOfController") + + // Set up event handlers. + svcInformer.Informer().AddEventHandler(...) + + return impl +} + +``` + +> How it works: by importing the accessor for a client or informer we link it +> and trigger the `init()` method for its package to run at startup. Each of +> these libraries registers themselves similar to our `init()` and controller +> processes can leverage this to setup and inject all of the registered things +> onto a context to pass to your `NewController()`. + +## Testing Controllers + +Similar to `injection.Default`, we also have `injection.Fake`. While linking the +normal accessors sets up the former, linking their fakes set up the latter. + +``` +import ( + "testing" + + // Link the fakes for any informers our controller accesses. + _ "knative.dev/pkg/injection/informers/kubeinformers/corev1/service/fake" + + "k8s.io/client-go/rest" + "knative.dev/pkg/injection" + logtesting "knative.dev/pkg/logging/testing" +) + +func TestFoo(t *testing.T) { + ctx := logtesting.TestContextWithLogger(t) + + // Setup a context from all of the injected fakes. + ctx, _ = injection.Fake.SetupInformers(ctx, &rest.Config{}) + cmw := configmap.NewStaticWatcher(...) + ctrl := NewController(ctx, cmw) + + // Test the controller process. +} +``` + +The fake clients also support manually setting up contexts seeded with objects: + +``` +import ( + "testing" + + fakekubeclient "knative.dev/pkg/injection/clients/kubeclient/fake" + + "k8s.io/client-go/rest" + "knative.dev/pkg/injection" + logtesting "knative.dev/pkg/logging/testing" +) + +func TestFoo(t *testing.T) { + ctx := logtesting.TestContextWithLogger(t) + + objs := []runtime.Object{ + // Some list of initial objects in the client. + } + + ctx, kubeClient := fakekubeclient.With(ctx, objs...) + + // The fake clients returned by our library are the actual fake type, + // which enables us to access test-specific methods, e.g. + kubeClient.AppendReactor(...) + + c := &Reconciler{ + Client: kubeClient, + } + + // Test the reconciler... +} +``` + +## Starting controllers + +All we do is import the controller packages and pass their constructors along +with a component name (single word) to our shared main. Then our shared main +method sets it all up and runs our controllers. + +```go +package main + +import ( + // The set of controllers this process will run. + "github.com/knative/foo/pkg/reconciler/bar" + "github.com/knative/baz/pkg/reconciler/blah" + + // This defines the shared main for injected controllers. + "knative.dev/pkg/injection/sharedmain" +) + +func main() { + sharedmain.Main("componentname", + bar.NewController, + blah.NewController, + ) +} + +``` + +## Generating Injection Stubs. + +To make generating stubs simple, we have harnessed the Kubernetes +code-generation tooling to produce `injection-gen`. Similar to how you might +ordinarily run the other `foo-gen` processed: + +```shell +CODEGEN_PKG=${CODEGEN_PKG:-$(cd ${REPO_ROOT}; ls -d -1 ./vendor/k8s.io/code-generator 2>/dev/null || echo ../code-generator)} + +${CODEGEN_PKG}/generate-groups.sh "deepcopy,client,informer,lister" \ + github.com/knative/sample-controller/pkg/client github.com/knative/sample-controller/pkg/apis \ + "samples:v1alpha1" \ + --go-header-file ${REPO_ROOT}/hack/boilerplate/boilerplate.go.txt +``` + +To run `injection-gen` you run the following (replacing the import path and api +group): + +```shell + +KNATIVE_CODEGEN_PKG=${KNATIVE_CODEGEN_PKG:-$(cd ${REPO_ROOT}; ls -d -1 ./vendor/knative.dev/pkg 2>/dev/null || echo ../pkg)} + +${KNATIVE_CODEGEN_PKG}/hack/generate-knative.sh "injection" \ + github.com/knative/sample-controller/pkg/client github.com/knative/sample-controller/pkg/apis \ + "samples:v1alpha1" \ + --go-header-file ${REPO_ROOT}/hack/boilerplate/boilerplate.go.txt + +``` + +To ensure the appropriate tooling is vendored, add the following to +`Gopkg.toml`: + +```toml +required = [ + "knative.dev/pkg/codegen/cmd/injection-gen", +] + +# .. Constraints + +# Keeps things like the generate-knative.sh script +[[prune.project]] + name = "knative.dev/pkg" + unused-packages = false + non-go = false +``` diff --git a/vendor/knative.dev/pkg/injection/clients.go b/vendor/knative.dev/pkg/injection/clients.go new file mode 100644 index 000000000..5c464924c --- /dev/null +++ b/vendor/knative.dev/pkg/injection/clients.go @@ -0,0 +1,42 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package injection + +import ( + "context" + + "k8s.io/client-go/rest" +) + +// ClientInjector holds the type of a callback that attaches a particular +// client type to a context. +type ClientInjector func(context.Context, *rest.Config) context.Context + +func (i *impl) RegisterClient(ci ClientInjector) { + i.m.Lock() + defer i.m.Unlock() + + i.clients = append(i.clients, ci) +} + +func (i *impl) GetClients() []ClientInjector { + i.m.RLock() + defer i.m.RUnlock() + + // Copy the slice before returning. + return append(i.clients[:0:0], i.clients...) +} diff --git a/vendor/knative.dev/pkg/injection/clients/kubeclient/fake/fake.go b/vendor/knative.dev/pkg/injection/clients/kubeclient/fake/fake.go new file mode 100644 index 000000000..269839bc6 --- /dev/null +++ b/vendor/knative.dev/pkg/injection/clients/kubeclient/fake/fake.go @@ -0,0 +1,53 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + "context" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes/fake" + "k8s.io/client-go/rest" + + "knative.dev/pkg/injection" + "knative.dev/pkg/injection/clients/kubeclient" + "knative.dev/pkg/logging" +) + +func init() { + injection.Fake.RegisterClient(withClient) +} + +func withClient(ctx context.Context, cfg *rest.Config) context.Context { + ctx, _ = With(ctx) + return ctx +} + +func With(ctx context.Context, objects ...runtime.Object) (context.Context, *fake.Clientset) { + cs := fake.NewSimpleClientset(objects...) + return context.WithValue(ctx, kubeclient.Key{}, cs), cs +} + +// Get extracts the Kubernetes client from the context. +func Get(ctx context.Context) *fake.Clientset { + untyped := ctx.Value(kubeclient.Key{}) + if untyped == nil { + logging.FromContext(ctx).Panicf( + "Unable to fetch %T from context.", (*fake.Clientset)(nil)) + } + return untyped.(*fake.Clientset) +} diff --git a/vendor/knative.dev/pkg/injection/clients/kubeclient/kubeclient.go b/vendor/knative.dev/pkg/injection/clients/kubeclient/kubeclient.go new file mode 100644 index 000000000..6b60663f7 --- /dev/null +++ b/vendor/knative.dev/pkg/injection/clients/kubeclient/kubeclient.go @@ -0,0 +1,49 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubeclient + +import ( + "context" + + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + + "knative.dev/pkg/injection" + "knative.dev/pkg/logging" +) + +func init() { + injection.Default.RegisterClient(withClient) +} + +// Key is used as the key for associating information +// with a context.Context. +type Key struct{} + +func withClient(ctx context.Context, cfg *rest.Config) context.Context { + return context.WithValue(ctx, Key{}, kubernetes.NewForConfigOrDie(cfg)) +} + +// Get extracts the Kubernetes client from the context. +func Get(ctx context.Context) kubernetes.Interface { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Panicf( + "Unable to fetch %T from context.", (kubernetes.Interface)(nil)) + } + return untyped.(kubernetes.Interface) +} diff --git a/vendor/knative.dev/pkg/injection/doc.go b/vendor/knative.dev/pkg/injection/doc.go new file mode 100644 index 000000000..c9da28918 --- /dev/null +++ b/vendor/knative.dev/pkg/injection/doc.go @@ -0,0 +1,68 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package injection defines the mechanisms through which clients, informers +// and shared informer factories are injected into a shared controller binary +// implementation. +// +// There are two primary contexts where the usage of the injection package is +// interesting. The first is in the context of implementations of +// `controller.Reconciler` being wrapped in a `*controller.Impl`: +// +// import ( +// // Simply linking this triggers the injection of the informer, which links +// // the factory triggering its injection, and which links the client, +// // triggering its injection. All you need to know is that it works :) +// deployinformer "knative.dev/pkg/injection/informers/kubeinformers/appsv1/deployment" +// "knative.dev/pkg/injection" +// ) +// +// func NewController(ctx context.Context) *controller.Impl { +// deploymentInformer := deployinformer.Get(ctx) +// // Pass deploymentInformer.Lister() to Reconciler +// ... +// // Set up events on deploymentInformer.Informer() +// ... +// } +// +// Then in `package main` the entire controller process can be set up via: +// +// package main +// +// import ( +// // The set of controllers this controller process runs. +// // Linking these will register their transitive dependencies, after +// // which the shared main can set up the rest. +// "github.com/knative/foo/pkg/reconciler/matt" +// "github.com/knative/foo/pkg/reconciler/scott" +// "github.com/knative/foo/pkg/reconciler/ville" +// "github.com/knative/foo/pkg/reconciler/dave" +// +// // This defines the shared main for injected controllers. +// "knative.dev/pkg/injection/sharedmain" +// ) +// +// func main() { +// sharedmain.Main("mycomponent", +// // We pass in the list of controllers to construct, and that's it! +// // If we forget to add this, go will complain about the unused import. +// matt.NewController, +// scott.NewController, +// ville.NewController, +// dave.NewController, +// ) +// } +package injection diff --git a/vendor/knative.dev/pkg/injection/factories.go b/vendor/knative.dev/pkg/injection/factories.go new file mode 100644 index 000000000..fc913612a --- /dev/null +++ b/vendor/knative.dev/pkg/injection/factories.go @@ -0,0 +1,40 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package injection + +import ( + "context" +) + +// InformerFactoryInjector holds the type of a callback that attaches a particular +// factory type to a context. +type InformerFactoryInjector func(context.Context) context.Context + +func (i *impl) RegisterInformerFactory(ifi InformerFactoryInjector) { + i.m.Lock() + defer i.m.Unlock() + + i.factories = append(i.factories, ifi) +} + +func (i *impl) GetInformerFactories() []InformerFactoryInjector { + i.m.RLock() + defer i.m.RUnlock() + + // Copy the slice before returning. + return append(i.factories[:0:0], i.factories...) +} diff --git a/vendor/knative.dev/pkg/injection/informers.go b/vendor/knative.dev/pkg/injection/informers.go new file mode 100644 index 000000000..2da9ad2c5 --- /dev/null +++ b/vendor/knative.dev/pkg/injection/informers.go @@ -0,0 +1,68 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package injection + +import ( + "context" + + "k8s.io/client-go/rest" + + "knative.dev/pkg/controller" +) + +// InformerInjector holds the type of a callback that attaches a particular +// informer type to a context. +type InformerInjector func(context.Context) (context.Context, controller.Informer) + +func (i *impl) RegisterInformer(ii InformerInjector) { + i.m.Lock() + defer i.m.Unlock() + + i.informers = append(i.informers, ii) +} + +func (i *impl) GetInformers() []InformerInjector { + i.m.RLock() + defer i.m.RUnlock() + + // Copy the slice before returning. + return append(i.informers[:0:0], i.informers...) +} + +func (i *impl) SetupInformers(ctx context.Context, cfg *rest.Config) (context.Context, []controller.Informer) { + // Based on the reconcilers we have linked, build up a set of clients and inject + // them onto the context. + for _, ci := range i.GetClients() { + ctx = ci(ctx, cfg) + } + + // Based on the reconcilers we have linked, build up a set of informer factories + // and inject them onto the context. + for _, ifi := range i.GetInformerFactories() { + ctx = ifi(ctx) + } + + // Based on the reconcilers we have linked, build up a set of informers + // and inject them onto the context. + var inf controller.Informer + informers := make([]controller.Informer, 0, len(i.GetInformers())) + for _, ii := range i.GetInformers() { + ctx, inf = ii(ctx) + informers = append(informers, inf) + } + return ctx, informers +} diff --git a/vendor/knative.dev/pkg/injection/informers/kubeinformers/corev1/pod/fake/fake.go b/vendor/knative.dev/pkg/injection/informers/kubeinformers/corev1/pod/fake/fake.go new file mode 100644 index 000000000..36d9b7eba --- /dev/null +++ b/vendor/knative.dev/pkg/injection/informers/kubeinformers/corev1/pod/fake/fake.go @@ -0,0 +1,38 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + "context" + + "knative.dev/pkg/controller" + "knative.dev/pkg/injection" + "knative.dev/pkg/injection/informers/kubeinformers/corev1/pod" + "knative.dev/pkg/injection/informers/kubeinformers/factory/fake" +) + +var Get = pod.Get + +func init() { + injection.Fake.RegisterInformer(withInformer) +} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := fake.Get(ctx) + inf := f.Core().V1().Pods() + return context.WithValue(ctx, pod.Key{}, inf), inf.Informer() +} diff --git a/vendor/knative.dev/pkg/injection/informers/kubeinformers/corev1/pod/pod.go b/vendor/knative.dev/pkg/injection/informers/kubeinformers/corev1/pod/pod.go new file mode 100644 index 000000000..a454764e6 --- /dev/null +++ b/vendor/knative.dev/pkg/injection/informers/kubeinformers/corev1/pod/pod.go @@ -0,0 +1,52 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pod + +import ( + "context" + + corev1 "k8s.io/client-go/informers/core/v1" + + "knative.dev/pkg/controller" + "knative.dev/pkg/injection" + "knative.dev/pkg/injection/informers/kubeinformers/factory" + "knative.dev/pkg/logging" +) + +func init() { + injection.Default.RegisterInformer(withInformer) +} + +// Key is used as the key for associating information +// with a context.Context. +type Key struct{} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := factory.Get(ctx) + inf := f.Core().V1().Pods() + return context.WithValue(ctx, Key{}, inf), inf.Informer() +} + +// Get extracts the Kubernetes Pod informer from the context. +func Get(ctx context.Context) corev1.PodInformer { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Panicf( + "Unable to fetch %T from context.", (corev1.PodInformer)(nil)) + } + return untyped.(corev1.PodInformer) +} diff --git a/vendor/knative.dev/pkg/injection/informers/kubeinformers/factory/factory.go b/vendor/knative.dev/pkg/injection/informers/kubeinformers/factory/factory.go new file mode 100644 index 000000000..a72408be2 --- /dev/null +++ b/vendor/knative.dev/pkg/injection/informers/kubeinformers/factory/factory.go @@ -0,0 +1,52 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package factory + +import ( + "context" + + "k8s.io/client-go/informers" + + "knative.dev/pkg/controller" + "knative.dev/pkg/injection" + "knative.dev/pkg/injection/clients/kubeclient" + "knative.dev/pkg/logging" +) + +func init() { + injection.Default.RegisterInformerFactory(withInformerFactory) +} + +// Key is used as the key for associating information +// with a context.Context. +type Key struct{} + +func withInformerFactory(ctx context.Context) context.Context { + kc := kubeclient.Get(ctx) + return context.WithValue(ctx, Key{}, + informers.NewSharedInformerFactory(kc, controller.GetResyncPeriod(ctx))) +} + +// Get extracts the Kubernetes InformerFactory from the context. +func Get(ctx context.Context) informers.SharedInformerFactory { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Panicf( + "Unable to fetch %T from context.", (informers.SharedInformerFactory)(nil)) + } + return untyped.(informers.SharedInformerFactory) +} diff --git a/vendor/knative.dev/pkg/injection/informers/kubeinformers/factory/fake/fake.go b/vendor/knative.dev/pkg/injection/informers/kubeinformers/factory/fake/fake.go new file mode 100644 index 000000000..1432c6b68 --- /dev/null +++ b/vendor/knative.dev/pkg/injection/informers/kubeinformers/factory/fake/fake.go @@ -0,0 +1,40 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + "context" + + "k8s.io/client-go/informers" + + "knative.dev/pkg/controller" + "knative.dev/pkg/injection" + "knative.dev/pkg/injection/clients/kubeclient/fake" + "knative.dev/pkg/injection/informers/kubeinformers/factory" +) + +var Get = factory.Get + +func init() { + injection.Fake.RegisterInformerFactory(withInformerFactory) +} + +func withInformerFactory(ctx context.Context) context.Context { + kc := fake.Get(ctx) + return context.WithValue(ctx, factory.Key{}, + informers.NewSharedInformerFactory(kc, controller.GetResyncPeriod(ctx))) +} diff --git a/vendor/knative.dev/pkg/injection/interface.go b/vendor/knative.dev/pkg/injection/interface.go new file mode 100644 index 000000000..bf6e7eef9 --- /dev/null +++ b/vendor/knative.dev/pkg/injection/interface.go @@ -0,0 +1,84 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package injection + +import ( + "context" + "sync" + + "k8s.io/client-go/rest" + + "knative.dev/pkg/configmap" + "knative.dev/pkg/controller" +) + +// Interface is the interface for interacting with injection +// implementations, such as our Default and Fake below. +type Interface interface { + // RegisterClient registers a new injector callback for associating + // a new client with a context. + RegisterClient(ClientInjector) + + // GetClients fetches all of the registered client injectors. + GetClients() []ClientInjector + + // RegisterInformerFactory registers a new injector callback for associating + // a new informer factory with a context. + RegisterInformerFactory(InformerFactoryInjector) + + // GetInformerFactories fetches all of the registered informer factory injectors. + GetInformerFactories() []InformerFactoryInjector + + // RegisterInformer registers a new injector callback for associating + // a new informer with a context. + RegisterInformer(InformerInjector) + + // GetInformers fetches all of the registered informer injectors. + GetInformers() []InformerInjector + + // SetupInformers runs all of the injectors against a context, starting with + // the clients and the given rest.Config. The resulting context is returned + // along with a list of the .Informer() for each of the injected informers, + // which is suitable for passing to controller.StartInformers(). + // This does not setup or start any controllers. + SetupInformers(context.Context, *rest.Config) (context.Context, []controller.Informer) +} + +type ControllerConstructor func(context.Context, configmap.Watcher) *controller.Impl + +var ( + // Check that impl implements Interface + _ Interface = (*impl)(nil) + + // Default is the injection interface with which informers should register + // to make themselves available to the controller process when reconcilers + // are being run for real. + Default Interface = &impl{} + + // Fake is the injection interface with which informers should register + // to make themselves available to the controller process when it is being + // unit tested. + Fake Interface = &impl{} +) + +type impl struct { + m sync.RWMutex + + clients []ClientInjector + factories []InformerFactoryInjector + informers []InformerInjector +} diff --git a/vendor/knative.dev/pkg/kmeta/OWNERS b/vendor/knative.dev/pkg/kmeta/OWNERS new file mode 100644 index 000000000..29b0d9f25 --- /dev/null +++ b/vendor/knative.dev/pkg/kmeta/OWNERS @@ -0,0 +1,4 @@ +# The OWNERS file is used by prow to automatically merge approved PRs. + +approvers: +- kmeta-approvers diff --git a/vendor/github.com/knative/pkg/kmeta/accessor.go b/vendor/knative.dev/pkg/kmeta/accessor.go similarity index 100% rename from vendor/github.com/knative/pkg/kmeta/accessor.go rename to vendor/knative.dev/pkg/kmeta/accessor.go diff --git a/vendor/github.com/knative/pkg/kmeta/doc.go b/vendor/knative.dev/pkg/kmeta/doc.go similarity index 100% rename from vendor/github.com/knative/pkg/kmeta/doc.go rename to vendor/knative.dev/pkg/kmeta/doc.go diff --git a/vendor/github.com/knative/pkg/kmeta/labels.go b/vendor/knative.dev/pkg/kmeta/labels.go similarity index 100% rename from vendor/github.com/knative/pkg/kmeta/labels.go rename to vendor/knative.dev/pkg/kmeta/labels.go diff --git a/vendor/knative.dev/pkg/kmeta/names.go b/vendor/knative.dev/pkg/kmeta/names.go new file mode 100644 index 000000000..1efa7108b --- /dev/null +++ b/vendor/knative.dev/pkg/kmeta/names.go @@ -0,0 +1,41 @@ +/* +copyright 2019 the knative authors + +licensed under the apache license, version 2.0 (the "license"); +you may not use this file except in compliance with the license. +you may obtain a copy of the license at + + http://www.apache.org/licenses/license-2.0 + +unless required by applicable law or agreed to in writing, software +distributed under the license is distributed on an "as is" basis, +without warranties or conditions of any kind, either express or implied. +see the license for the specific language governing permissions and +limitations under the license. +*/ + +package kmeta + +import ( + "crypto/md5" + "fmt" +) + +// The longest name supported by the K8s is 63. +// These constants +const ( + longest = 63 + md5Len = 32 + head = longest - md5Len +) + +// ChildName generates a name for the resource based upong the parent resource and suffix. +// If the concatenated name is longer than K8s permits the name is hashed and truncated to permit +// construction of the resource, but still keeps it unique. +func ChildName(parent, suffix string) string { + n := parent + if len(parent) > (longest - len(suffix)) { + n = fmt.Sprintf("%s%x", parent[:head-len(suffix)], md5.Sum([]byte(parent))) + } + return n + suffix +} diff --git a/vendor/github.com/knative/pkg/kmeta/owner_references.go b/vendor/knative.dev/pkg/kmeta/owner_references.go similarity index 100% rename from vendor/github.com/knative/pkg/kmeta/owner_references.go rename to vendor/knative.dev/pkg/kmeta/owner_references.go diff --git a/vendor/knative.dev/pkg/kmeta/ownerrefable_accessor.go b/vendor/knative.dev/pkg/kmeta/ownerrefable_accessor.go new file mode 100644 index 000000000..822a7ac0c --- /dev/null +++ b/vendor/knative.dev/pkg/kmeta/ownerrefable_accessor.go @@ -0,0 +1,25 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package kmeta + +// OwnerRefableAccessor is a combination of OwnerRefable interface and Accessor interface +// which inidcates that it has 1) sufficient information to produce a metav1.OwnerReference to an object, +// 2) and a collection of interfaces from metav1.TypeMeta runtime.Object and metav1.Object that Kubernetes API types +// registered with runtime.Scheme must support. +type OwnerRefableAccessor interface { + OwnerRefable + Accessor +} diff --git a/vendor/github.com/knative/pkg/kmp/diff.go b/vendor/knative.dev/pkg/kmp/diff.go similarity index 62% rename from vendor/github.com/knative/pkg/kmp/diff.go rename to vendor/knative.dev/pkg/kmp/diff.go index ef9bae39e..09c041446 100644 --- a/vendor/github.com/knative/pkg/kmp/diff.go +++ b/vendor/knative.dev/pkg/kmp/diff.go @@ -36,6 +36,8 @@ func init() { // SafeDiff wraps cmp.Diff but recovers from panics and uses custom Comparers for: // * k8s.io/apimachinery/pkg/api/resource.Quantity +// SafeDiff should be used instead of cmp.Diff in non-test code to protect the running +// process from crashing. func SafeDiff(x, y interface{}, opts ...cmp.Option) (diff string, err error) { // cmp.Diff will panic if we miss something; return error instead of crashing. defer func() { @@ -50,6 +52,10 @@ func SafeDiff(x, y interface{}, opts ...cmp.Option) (diff string, err error) { return } +// SafeEqual wraps cmp.Equal but recovers from panics and uses custom Comparers for: +// * k8s.io/apimachinery/pkg/api/resource.Quantity +// SafeEqual should be used instead of cmp.Equal in non-test code to protect the running +// process from crashing. func SafeEqual(x, y interface{}, opts ...cmp.Option) (equal bool, err error) { // cmp.Equal will panic if we miss something; return error instead of crashing. defer func() { @@ -63,3 +69,24 @@ func SafeEqual(x, y interface{}, opts ...cmp.Option) (equal bool, err error) { return } + +// CompareSetFields returns a list of field names that differ between +// x and y. Uses SafeEqual for comparison. +func CompareSetFields(x, y interface{}, opts ...cmp.Option) ([]string, error) { + r := new(FieldListReporter) + opts = append(opts, cmp.Reporter(r)) + _, err := SafeEqual(x, y, opts...) + return r.Fields(), err +} + +// ShortDiff returns a zero-context, unified human-readable diff. +// Uses SafeEqual for comparison. +func ShortDiff(prev, cur interface{}, opts ...cmp.Option) (string, error) { + r := new(ShortDiffReporter) + opts = append(opts, cmp.Reporter(r)) + var err error + if _, err = SafeEqual(prev, cur, opts...); err != nil { + return "", err + } + return r.Diff() +} diff --git a/vendor/github.com/knative/pkg/kmp/doc.go b/vendor/knative.dev/pkg/kmp/doc.go similarity index 100% rename from vendor/github.com/knative/pkg/kmp/doc.go rename to vendor/knative.dev/pkg/kmp/doc.go diff --git a/vendor/knative.dev/pkg/kmp/reporters.go b/vendor/knative.dev/pkg/kmp/reporters.go new file mode 100644 index 000000000..6221b16e9 --- /dev/null +++ b/vendor/knative.dev/pkg/kmp/reporters.go @@ -0,0 +1,148 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kmp + +import ( + "fmt" + "reflect" + "sort" + "strings" + + "github.com/google/go-cmp/cmp" +) + +// FieldListReporter implements the cmp.Reporter interface. It keeps +// track of the field names that differ between two structs and reports +// them through the Fields() function. +type FieldListReporter struct { + path cmp.Path + fieldNames []string +} + +// PushStep implements the cmp.Reporter. +func (r *FieldListReporter) PushStep(ps cmp.PathStep) { + r.path = append(r.path, ps) +} + +// fieldName returns a readable name for the field. If the field has JSON annotations it +// returns the JSON key. If the field does not have JSON annotations or the JSON annotation +// marks the field as ignored it returns the field's go name +func (r *FieldListReporter) fieldName() string { + if len(r.path) < 2 { + return r.path.Index(0).String() + } else { + fieldName := strings.TrimPrefix(r.path.Index(1).String(), ".") + // Prefer JSON name to fieldName if it exists + structField, exists := r.path.Index(0).Type().FieldByName(fieldName) + if exists { + tag := structField.Tag.Get("json") + if tag != "" && tag != "-" { + return strings.SplitN(tag, ",", 2)[0] + } + + } + return fieldName + } +} + +// Report implements the cmp.Reporter. +func (r *FieldListReporter) Report(rs cmp.Result) { + if rs.Equal() { + return + } + name := r.fieldName() + // Only append elements we don't already have. + for _, v := range r.fieldNames { + if name == v { + return + } + } + r.fieldNames = append(r.fieldNames, name) +} + +// PopStep implements cmp.Reporter. +func (r *FieldListReporter) PopStep() { + r.path = r.path[:len(r.path)-1] +} + +// Fields returns the field names that differed between the two +// objects after calling cmp.Equal with the FieldListReporter. Field names +// are returned in alphabetical order. +func (r *FieldListReporter) Fields() []string { + sort.Strings(r.fieldNames) + return r.fieldNames +} + +// ShortDiffReporter implements the cmp.Reporter interface. It reports +// on fields which have diffing values in a short zero-context, unified diff +// format. +type ShortDiffReporter struct { + path cmp.Path + diffs []string + err error +} + +// PushStep implements the cmp.Reporter. +func (r *ShortDiffReporter) PushStep(ps cmp.PathStep) { + r.path = append(r.path, ps) +} + +// Report implements the cmp.Reporter. +func (r *ShortDiffReporter) Report(rs cmp.Result) { + if rs.Equal() { + return + } + cur := r.path.Last() + vx, vy := cur.Values() + t := cur.Type() + var diff string + // Prefix struct values with the types to add clarity in output + if !vx.IsValid() && !vy.IsValid() { + r.err = fmt.Errorf("Unable to diff %+v and %+v on path %#v", vx, vy, r.path) + } else { + diff = fmt.Sprintf("%#v:\n", r.path) + if vx.IsValid() { + diff += r.diffString("-", t, vx) + } + if vy.IsValid() { + diff += r.diffString("+", t, vy) + } + } + r.diffs = append(r.diffs, diff) +} + +func (r *ShortDiffReporter) diffString(diffType string, t reflect.Type, v reflect.Value) string { + if t.Kind() == reflect.Struct { + return fmt.Sprintf("\t%s: %+v: \"%+v\"\n", diffType, t, v) + } else { + return fmt.Sprintf("\t%s: \"%+v\"\n", diffType, v) + } +} + +// PopStep implements the cmp.Reporter. +func (r *ShortDiffReporter) PopStep() { + r.path = r.path[:len(r.path)-1] +} + +// Diff returns the generated short diff for this object. +// cmp.Equal should be called before this method. +func (r *ShortDiffReporter) Diff() (string, error) { + if r.err != nil { + return "", r.err + } + return strings.Join(r.diffs, ""), nil +} diff --git a/vendor/knative.dev/pkg/logging/OWNERS b/vendor/knative.dev/pkg/logging/OWNERS new file mode 100644 index 000000000..fa4854ba0 --- /dev/null +++ b/vendor/knative.dev/pkg/logging/OWNERS @@ -0,0 +1,4 @@ +# The OWNERS file is used by prow to automatically merge approved PRs. + +approvers: +- logging-approvers diff --git a/vendor/github.com/knative/pkg/logging/config.go b/vendor/knative.dev/pkg/logging/config.go similarity index 82% rename from vendor/github.com/knative/pkg/logging/config.go rename to vendor/knative.dev/pkg/logging/config.go index 583b479c4..1a658fdc0 100644 --- a/vendor/github.com/knative/pkg/logging/config.go +++ b/vendor/knative.dev/pkg/logging/config.go @@ -20,15 +20,19 @@ import ( "encoding/json" "errors" "fmt" + "os" + "strings" "go.uber.org/zap" "go.uber.org/zap/zapcore" corev1 "k8s.io/api/core/v1" - "github.com/knative/pkg/changeset" - "github.com/knative/pkg/logging/logkey" + "knative.dev/pkg/changeset" + "knative.dev/pkg/logging/logkey" ) +const ConfigMapNameEnv = "CONFIG_LOGGING_NAME" + // NewLogger creates a logger with the supplied configuration. // In addition to the logger, it returns AtomicLevel that can // be used to change the logging level at runtime. @@ -128,7 +132,7 @@ const defaultZLC = `{ // NewConfigFromMap creates a LoggingConfig from the supplied map, // expecting the given list of components. -func NewConfigFromMap(data map[string]string, components ...string) (*Config, error) { +func NewConfigFromMap(data map[string]string) (*Config, error) { lc := &Config{} if zlc, ok := data["zap-logger-config"]; ok { lc.LoggingConfig = zlc @@ -137,16 +141,15 @@ func NewConfigFromMap(data map[string]string, components ...string) (*Config, er } lc.LoggingLevel = make(map[string]zapcore.Level) - for _, component := range components { - if ll := data["loglevel."+component]; len(ll) > 0 { - level, err := levelFromString(ll) - if err != nil { - return nil, err + for k, v := range data { + if component := strings.TrimPrefix(k, "loglevel."); component != k && component != "" { + if len(v) > 0 { + level, err := levelFromString(v) + if err != nil { + return nil, err + } + lc.LoggingLevel[component] = *level } - lc.LoggingLevel[component] = *level - } else { - // We default components to INFO - lc.LoggingLevel[component] = zapcore.InfoLevel } } return lc, nil @@ -154,8 +157,8 @@ func NewConfigFromMap(data map[string]string, components ...string) (*Config, er // NewConfigFromConfigMap creates a LoggingConfig from the supplied ConfigMap, // expecting the given list of components. -func NewConfigFromConfigMap(configMap *corev1.ConfigMap, components ...string) (*Config, error) { - return NewConfigFromMap(configMap.Data, components...) +func NewConfigFromConfigMap(configMap *corev1.ConfigMap) (*Config, error) { + return NewConfigFromMap(configMap.Data) } func levelFromString(level string) (*zapcore.Level, error) { @@ -169,19 +172,27 @@ func levelFromString(level string) (*zapcore.Level, error) { // UpdateLevelFromConfigMap returns a helper func that can be used to update the logging level // when a config map is updated func UpdateLevelFromConfigMap(logger *zap.SugaredLogger, atomicLevel zap.AtomicLevel, - levelKey string, components ...string) func(configMap *corev1.ConfigMap) { + levelKey string) func(configMap *corev1.ConfigMap) { return func(configMap *corev1.ConfigMap) { - loggingConfig, err := NewConfigFromConfigMap(configMap, components...) + loggingConfig, err := NewConfigFromConfigMap(configMap) if err != nil { logger.Errorw("Failed to parse the logging configmap. Previous config map will be used.", zap.Error(err)) return } - if level, ok := loggingConfig.LoggingLevel[levelKey]; ok { - if atomicLevel.Level() != level { - logger.Infof("Updating logging level for %v from %v to %v.", levelKey, atomicLevel.Level(), level) - atomicLevel.SetLevel(level) - } + level := loggingConfig.LoggingLevel[levelKey] + if atomicLevel.Level() != level { + logger.Infof("Updating logging level for %v from %v to %v.", levelKey, atomicLevel.Level(), level) + atomicLevel.SetLevel(level) } } } + +// ConfigMapName gets the name of the logging ConfigMap +func ConfigMapName() string { + cm := os.Getenv(ConfigMapNameEnv) + if cm == "" { + return "config-logging" + } + return cm +} diff --git a/vendor/github.com/knative/pkg/logging/logger.go b/vendor/knative.dev/pkg/logging/logger.go similarity index 100% rename from vendor/github.com/knative/pkg/logging/logger.go rename to vendor/knative.dev/pkg/logging/logger.go diff --git a/vendor/github.com/knative/pkg/logging/logkey/constants.go b/vendor/knative.dev/pkg/logging/logkey/constants.go similarity index 94% rename from vendor/github.com/knative/pkg/logging/logkey/constants.go rename to vendor/knative.dev/pkg/logging/logkey/constants.go index e4c62ee0d..e90abec97 100644 --- a/vendor/github.com/knative/pkg/logging/logkey/constants.go +++ b/vendor/knative.dev/pkg/logging/logkey/constants.go @@ -23,6 +23,9 @@ const ( // Key is the key (namespace/name) being reconciled. Key = "knative.dev/key" + // TraceId is the key used to track an asynchronous or long running operation. + TraceId = "knative.dev/traceid" + // Namespace is the key used for namespace in structured logs Namespace = "knative.dev/namespace" diff --git a/vendor/knative.dev/pkg/logging/testing/util.go b/vendor/knative.dev/pkg/logging/testing/util.go new file mode 100644 index 000000000..29daf9934 --- /dev/null +++ b/vendor/knative.dev/pkg/logging/testing/util.go @@ -0,0 +1,68 @@ +/* +Copyright 2018 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testing + +import ( + "context" + "sync" + "testing" + + "go.uber.org/zap" + "go.uber.org/zap/zaptest" + + "knative.dev/pkg/logging" +) + +var ( + loggers = make(map[string]*zap.SugaredLogger) + m sync.Mutex +) + +// TestLogger gets a logger to use in unit and end to end tests +func TestLogger(t *testing.T) *zap.SugaredLogger { + m.Lock() + defer m.Unlock() + + logger, ok := loggers[t.Name()] + + if ok { + return logger + } + + opts := zaptest.WrapOptions( + zap.AddCaller(), + zap.Development(), + ) + + logger = zaptest.NewLogger(t, opts).Sugar().Named(t.Name()) + loggers[t.Name()] = logger + + return logger +} + +// ClearAll removes all the testing loggers. +// `go test -count=X` executes runs in the same process, thus the map +// persists between the runs, but the `t` will no longer be valid and will +// cause a panic deep inside testing code. +func ClearAll() { + loggers = make(map[string]*zap.SugaredLogger) +} + +// TestContextWithLogger returns a context with a logger to be used in tests +func TestContextWithLogger(t *testing.T) context.Context { + return logging.WithLogger(context.TODO(), TestLogger(t)) +} diff --git a/vendor/github.com/knative/pkg/logging/zz_generated.deepcopy.go b/vendor/knative.dev/pkg/logging/zz_generated.deepcopy.go similarity index 100% rename from vendor/github.com/knative/pkg/logging/zz_generated.deepcopy.go rename to vendor/knative.dev/pkg/logging/zz_generated.deepcopy.go diff --git a/vendor/knative.dev/pkg/metrics/OWNERS b/vendor/knative.dev/pkg/metrics/OWNERS new file mode 100644 index 000000000..6d3966df4 --- /dev/null +++ b/vendor/knative.dev/pkg/metrics/OWNERS @@ -0,0 +1,4 @@ +# The OWNERS file is used by prow to automatically merge approved PRs. + +approvers: +- metrics-approvers diff --git a/vendor/github.com/knative/pkg/metrics/config.go b/vendor/knative.dev/pkg/metrics/config.go similarity index 89% rename from vendor/github.com/knative/pkg/metrics/config.go rename to vendor/knative.dev/pkg/metrics/config.go index e57aec821..627e064a3 100644 --- a/vendor/github.com/knative/pkg/metrics/config.go +++ b/vendor/knative.dev/pkg/metrics/config.go @@ -29,6 +29,11 @@ import ( corev1 "k8s.io/api/core/v1" ) +const ( + DomainEnv = "METRICS_DOMAIN" + ConfigMapNameEnv = "CONFIG_OBSERVABILITY_NAME" +) + // metricsBackend specifies the backend to use for metrics type metricsBackend string @@ -201,8 +206,8 @@ func getMetricsConfig(ops ExporterOptions, logger *zap.SugaredLogger) (*metricsC // UpdateExporterFromConfigMap returns a helper func that can be used to update the exporter // when a config map is updated. -// DEPRECATED. Use UpdateExporter instead. -func UpdateExporterFromConfigMap(domain string, component string, logger *zap.SugaredLogger) func(configMap *corev1.ConfigMap) { +func UpdateExporterFromConfigMap(component string, logger *zap.SugaredLogger) func(configMap *corev1.ConfigMap) { + domain := Domain() return func(configMap *corev1.ConfigMap) { UpdateExporter(ExporterOptions{ Domain: domain, @@ -226,6 +231,8 @@ func UpdateExporter(ops ExporterOptions, logger *zap.SugaredLogger) error { } if isNewExporterRequired(newConfig) { + logger.Info("Flushing the existing exporter before setting up the new exporter.") + FlushExporter() e, err := newMetricsExporter(newConfig, logger) if err != nil { logger.Errorf("Failed to update a new metrics exporter based on metric config %v. error: %v", newConfig, err) @@ -252,3 +259,35 @@ func isNewExporterRequired(newConfig *metricsConfig) bool { return false } + +// ConfigMapName gets the name of the metrics ConfigMap +func ConfigMapName() string { + cm := os.Getenv(ConfigMapNameEnv) + if cm == "" { + return "config-observability" + } + return cm +} + +// Domain holds the metrics domain to use for surfacing metrics. +func Domain() string { + if domain := os.Getenv(DomainEnv); domain != "" { + return domain + } + + panic(fmt.Sprintf(`The environment variable %q is not set + +If this is a process running on Kubernetes, then it should be specifying +this via: + + env: + - name: %s + value: knative.dev/some-repository + +If this is a Go unit test consuming metric.Domain() then it should add the +following import: + +import ( + _ "knative.dev/pkg/metrics/testing" +)`, DomainEnv, DomainEnv)) +} diff --git a/vendor/github.com/knative/pkg/metrics/doc.go b/vendor/knative.dev/pkg/metrics/doc.go similarity index 100% rename from vendor/github.com/knative/pkg/metrics/doc.go rename to vendor/knative.dev/pkg/metrics/doc.go diff --git a/vendor/github.com/knative/pkg/metrics/exporter.go b/vendor/knative.dev/pkg/metrics/exporter.go similarity index 82% rename from vendor/github.com/knative/pkg/metrics/exporter.go rename to vendor/knative.dev/pkg/metrics/exporter.go index 238f400f0..e20637e3a 100644 --- a/vendor/github.com/knative/pkg/metrics/exporter.go +++ b/vendor/knative.dev/pkg/metrics/exporter.go @@ -27,6 +27,11 @@ var ( metricsMux sync.Mutex ) +type flushable interface { + // Flush waits for metrics to be uploaded. + Flush() +} + // newMetricsExporter gets a metrics exporter based on the config. func newMetricsExporter(config *metricsConfig, logger *zap.SugaredLogger) (view.Exporter, error) { // If there is a Prometheus Exporter server running, stop it. @@ -83,3 +88,19 @@ func setCurMetricsConfig(c *metricsConfig) { } curMetricsConfig = c } + +// FlushExporter waits for exported data to be uploaded. +// This should be called before the process shuts down or exporter is replaced. +// Return value indicates whether the exporter is flushable or not. +func FlushExporter() bool { + e := getCurMetricsExporter() + if e == nil { + return false + } + + if f, ok := e.(flushable); ok { + f.Flush() + return true + } + return false +} diff --git a/vendor/github.com/knative/pkg/metrics/gcp_metadata.go b/vendor/knative.dev/pkg/metrics/gcp_metadata.go similarity index 91% rename from vendor/github.com/knative/pkg/metrics/gcp_metadata.go rename to vendor/knative.dev/pkg/metrics/gcp_metadata.go index ed64fb733..5f33e3ea4 100644 --- a/vendor/github.com/knative/pkg/metrics/gcp_metadata.go +++ b/vendor/knative.dev/pkg/metrics/gcp_metadata.go @@ -15,7 +15,7 @@ package metrics import ( "cloud.google.com/go/compute/metadata" - "github.com/knative/pkg/metrics/metricskey" + "knative.dev/pkg/metrics/metricskey" ) func retrieveGCPMetadata() *gcpMetadata { @@ -28,7 +28,7 @@ func retrieveGCPMetadata() *gcpMetadata { if err == nil && project != "" { gm.project = project } - location, err := metadata.Zone() + location, err := metadata.InstanceAttributeValue("cluster-location") if err == nil && location != "" { gm.location = location } diff --git a/vendor/github.com/knative/pkg/metrics/metricskey/constants.go b/vendor/knative.dev/pkg/metrics/metricskey/constants.go similarity index 100% rename from vendor/github.com/knative/pkg/metrics/metricskey/constants.go rename to vendor/knative.dev/pkg/metrics/metricskey/constants.go diff --git a/vendor/github.com/knative/pkg/metrics/monitored_resources.go b/vendor/knative.dev/pkg/metrics/monitored_resources.go similarity index 97% rename from vendor/github.com/knative/pkg/metrics/monitored_resources.go rename to vendor/knative.dev/pkg/metrics/monitored_resources.go index 295fb778f..d8ab5d875 100644 --- a/vendor/github.com/knative/pkg/metrics/monitored_resources.go +++ b/vendor/knative.dev/pkg/metrics/monitored_resources.go @@ -14,7 +14,7 @@ limitations under the License. package metrics import ( - "github.com/knative/pkg/metrics/metricskey" + "knative.dev/pkg/metrics/metricskey" ) type gcpMetadata struct { diff --git a/vendor/github.com/knative/pkg/metrics/prometheus_exporter.go b/vendor/knative.dev/pkg/metrics/prometheus_exporter.go similarity index 97% rename from vendor/github.com/knative/pkg/metrics/prometheus_exporter.go rename to vendor/knative.dev/pkg/metrics/prometheus_exporter.go index c3c0d55d0..b83b23a0d 100644 --- a/vendor/github.com/knative/pkg/metrics/prometheus_exporter.go +++ b/vendor/knative.dev/pkg/metrics/prometheus_exporter.go @@ -18,7 +18,7 @@ import ( "net/http" "sync" - "go.opencensus.io/exporter/prometheus" + "contrib.go.opencensus.io/exporter/prometheus" "go.opencensus.io/stats/view" "go.uber.org/zap" ) diff --git a/vendor/github.com/knative/pkg/metrics/record.go b/vendor/knative.dev/pkg/metrics/record.go similarity index 80% rename from vendor/github.com/knative/pkg/metrics/record.go rename to vendor/knative.dev/pkg/metrics/record.go index 98a007cfe..1b045ea0a 100644 --- a/vendor/github.com/knative/pkg/metrics/record.go +++ b/vendor/knative.dev/pkg/metrics/record.go @@ -20,8 +20,8 @@ import ( "context" "path" - "github.com/knative/pkg/metrics/metricskey" "go.opencensus.io/stats" + "knative.dev/pkg/metrics/metricskey" ) // Record decides whether to record one measurement via OpenCensus based on the @@ -54,3 +54,14 @@ func Record(ctx context.Context, ms stats.Measurement) { stats.Record(ctx, ms) } } + +// Buckets125 generates an array of buckets with approximate powers-of-two +// buckets that also aligns with powers of 10 on every 3rd step. This can +// be used to create a view.Distribution. +func Buckets125(low, high float64) []float64 { + buckets := []float64{low} + for last := low; last < high; last = last * 10 { + buckets = append(buckets, 2*last, 5*last, 10*last) + } + return buckets +} diff --git a/vendor/github.com/knative/pkg/metrics/stackdriver_exporter.go b/vendor/knative.dev/pkg/metrics/stackdriver_exporter.go similarity index 99% rename from vendor/github.com/knative/pkg/metrics/stackdriver_exporter.go rename to vendor/knative.dev/pkg/metrics/stackdriver_exporter.go index 60bf1d5f3..6d2a8cab8 100644 --- a/vendor/github.com/knative/pkg/metrics/stackdriver_exporter.go +++ b/vendor/knative.dev/pkg/metrics/stackdriver_exporter.go @@ -18,10 +18,10 @@ import ( "contrib.go.opencensus.io/exporter/stackdriver" "contrib.go.opencensus.io/exporter/stackdriver/monitoredresource" - "github.com/knative/pkg/metrics/metricskey" "go.opencensus.io/stats/view" "go.opencensus.io/tag" "go.uber.org/zap" + "knative.dev/pkg/metrics/metricskey" ) // customMetricTypePrefix is the metric type prefix for unsupported metrics by diff --git a/vendor/knative.dev/pkg/reconciler/testing/actions.go b/vendor/knative.dev/pkg/reconciler/testing/actions.go new file mode 100644 index 000000000..7dc967b92 --- /dev/null +++ b/vendor/knative.dev/pkg/reconciler/testing/actions.go @@ -0,0 +1,76 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testing + +import ( + "fmt" + + clientgotesting "k8s.io/client-go/testing" +) + +// Actions stores list of Actions recorded by the reactors. +type Actions struct { + Gets []clientgotesting.GetAction + Creates []clientgotesting.CreateAction + Updates []clientgotesting.UpdateAction + Deletes []clientgotesting.DeleteAction + DeleteCollections []clientgotesting.DeleteCollectionAction + Patches []clientgotesting.PatchAction +} + +// ActionRecorder contains list of K8s request actions. +type ActionRecorder interface { + Actions() []clientgotesting.Action +} + +// ActionRecorderList is a list of ActionRecorder objects. +type ActionRecorderList []ActionRecorder + +// ActionsByVerb fills in Actions objects, sorting the actions +// by verb. +func (l ActionRecorderList) ActionsByVerb() (Actions, error) { + var a Actions + + for _, recorder := range l { + for _, action := range recorder.Actions() { + switch action.GetVerb() { + case "get": + a.Gets = append(a.Gets, + action.(clientgotesting.GetAction)) + case "create": + a.Creates = append(a.Creates, + action.(clientgotesting.CreateAction)) + case "update": + a.Updates = append(a.Updates, + action.(clientgotesting.UpdateAction)) + case "delete": + a.Deletes = append(a.Deletes, + action.(clientgotesting.DeleteAction)) + case "delete-collection": + a.DeleteCollections = append(a.DeleteCollections, + action.(clientgotesting.DeleteCollectionAction)) + case "patch": + a.Patches = append(a.Patches, + action.(clientgotesting.PatchAction)) + case "list", "watch": // avoid 'unexpected verb list/watch' error + default: + return a, fmt.Errorf("unexpected verb %v: %+v", action.GetVerb(), action) + } + } + } + return a, nil +} diff --git a/vendor/knative.dev/pkg/reconciler/testing/clock.go b/vendor/knative.dev/pkg/reconciler/testing/clock.go new file mode 100644 index 000000000..44ba77cdb --- /dev/null +++ b/vendor/knative.dev/pkg/reconciler/testing/clock.go @@ -0,0 +1,29 @@ +/* +Copyright 2019 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testing + +import ( + "time" +) + +type FakeClock struct { + Time time.Time +} + +func (c FakeClock) Now() time.Time { + return c.Time +} diff --git a/vendor/knative.dev/pkg/reconciler/testing/context.go b/vendor/knative.dev/pkg/reconciler/testing/context.go new file mode 100644 index 000000000..723c87efa --- /dev/null +++ b/vendor/knative.dev/pkg/reconciler/testing/context.go @@ -0,0 +1,35 @@ +/* +Copyright 2019 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testing + +import ( + "context" + "testing" + + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/record" + + "knative.dev/pkg/controller" + "knative.dev/pkg/injection" + logtesting "knative.dev/pkg/logging/testing" +) + +func SetupFakeContext(t *testing.T) (context.Context, []controller.Informer) { + ctx := logtesting.TestContextWithLogger(t) + ctx = controller.WithEventRecorder(ctx, record.NewFakeRecorder(1000)) + return injection.Fake.SetupInformers(ctx, &rest.Config{}) +} diff --git a/vendor/knative.dev/pkg/reconciler/testing/events.go b/vendor/knative.dev/pkg/reconciler/testing/events.go new file mode 100644 index 000000000..498b04f7e --- /dev/null +++ b/vendor/knative.dev/pkg/reconciler/testing/events.go @@ -0,0 +1,44 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testing + +import ( + "fmt" + + "k8s.io/client-go/tools/record" +) + +// EventList exports all events during reconciliation through fake event recorder +// with event channel with buffer of given size. +type EventList struct { + Recorder *record.FakeRecorder +} + +// Events iterates over events received from channel in fake event recorder and returns all. +func (l EventList) Events() []string { + close(l.Recorder.Events) + events := []string{} + for e := range l.Recorder.Events { + events = append(events, e) + } + return events +} + +// Eventf formats as FakeRecorder does. +func Eventf(eventType, reason, messageFmt string, args ...interface{}) string { + return fmt.Sprintf(eventType+" "+reason+" "+messageFmt, args...) +} diff --git a/vendor/knative.dev/pkg/reconciler/testing/generate_name_reactor.go b/vendor/knative.dev/pkg/reconciler/testing/generate_name_reactor.go new file mode 100644 index 000000000..52ac44951 --- /dev/null +++ b/vendor/knative.dev/pkg/reconciler/testing/generate_name_reactor.go @@ -0,0 +1,86 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testing + +import ( + "fmt" + "sync/atomic" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + clientgotesting "k8s.io/client-go/testing" +) + +// GenerateNameReactor will simulate the k8s API server +// and generate a name for resources who's metadata.generateName +// property is set. This happens only for CreateAction types +// +// This generator is deterministic (unliked k8s) and uses a global +// counter to help make test names predictable +type GenerateNameReactor struct { + count int64 +} + +// Handles contains all the logic to generate the name and mutates +// the create action object +// +// This is a hack as 'React' is passed a DeepCopy of the action hence +// this is the only opportunity to 'mutate' the action in the +// ReactionChain and have to continue executing additional reactors +// +// We should push changes upstream to client-go to help us with +// mocking +func (r *GenerateNameReactor) Handles(action clientgotesting.Action) bool { + create, ok := action.(clientgotesting.CreateAction) + + if !ok { + return false + } + + objMeta, err := meta.Accessor(create.GetObject()) + + if err != nil { + return false + } + + if objMeta.GetName() != "" { + return false + } + + if objMeta.GetGenerateName() == "" { + return false + } + + val := atomic.AddInt64(&r.count, 1) + + objMeta.SetName(fmt.Sprintf("%s%05d", objMeta.GetGenerateName(), val)) + + return false +} + +// React is noop-function +func (r *GenerateNameReactor) React(action clientgotesting.Action) (handled bool, ret runtime.Object, err error) { + return false, nil, nil +} + +var _ clientgotesting.Reactor = (*GenerateNameReactor)(nil) + +// PrependGenerateNameReactor will instrument a client-go testing Fake +// with a reactor that simulates 'generateName' functionality +func PrependGenerateNameReactor(f *clientgotesting.Fake) { + f.ReactionChain = append([]clientgotesting.Reactor{&GenerateNameReactor{}}, f.ReactionChain...) +} diff --git a/vendor/knative.dev/pkg/reconciler/testing/hooks.go b/vendor/knative.dev/pkg/reconciler/testing/hooks.go new file mode 100644 index 000000000..cde3d7d21 --- /dev/null +++ b/vendor/knative.dev/pkg/reconciler/testing/hooks.go @@ -0,0 +1,183 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package testing includes utilities for testing controllers. +package testing + +import ( + "errors" + "sync" + "sync/atomic" + "time" + + "k8s.io/apimachinery/pkg/runtime" + kubetesting "k8s.io/client-go/testing" +) + +// HookResult is the return value of hook functions. +type HookResult bool + +const ( + // HookComplete indicates the hook function completed, and WaitForHooks should + // not wait for it. + HookComplete HookResult = true + // HookIncomplete indicates the hook function is incomplete, and WaitForHooks + // should wait for it to complete. + HookIncomplete HookResult = false +) + +/* +CreateHookFunc is a function for handling a Create hook. Its runtime.Object +parameter will be the Kubernetes resource created. The resource can be cast +to its actual type like this: + + pod := obj.(*v1.Pod) + +A return value of true marks the hook as completed. Returning false allows +the hook to run again when the next resource of the requested type is +created. +*/ +type CreateHookFunc func(runtime.Object) HookResult + +/* +UpdateHookFunc is a function for handling an update hook. its runtime.Object +parameter will be the Kubernetes resource updated. The resource can be cast +to its actual type like this: + + pod := obj.(*v1.Pod) + +A return value of true marks the hook as completed. Returning false allows +the hook to run again when the next resource of the requested type is +updated. +*/ +type UpdateHookFunc func(runtime.Object) HookResult + +/* +DeleteHookFunc is a function for handling a delete hook. Its name parameter will +be the name of the resource deleted. The resource itself is not available to +the reactor. +*/ +type DeleteHookFunc func(string) HookResult + +/* +Hooks is a utility struct that simplifies controller testing with fake +clients. A Hooks struct allows attaching hook functions to actions (create, +update, delete) on a specified resource type within a fake client and ensuring +that all hooks complete in a timely manner. +*/ +type Hooks struct { + completionCh chan int32 + completionIndex int32 + + // Denotes whether or not the registered hooks should no longer be called + // because they have already been waited upon. + // This uses a Mutex over a channel to guarantee that after WaitForHooks + // returns no hooked functions will be called. + closed bool + mutex sync.RWMutex +} + +// NewHooks returns a Hooks struct that can be used to attach hooks to one or +// more fake clients and wait for all hooks to complete. +// TODO(grantr): Allow validating that a hook never fires +func NewHooks() *Hooks { + return &Hooks{ + completionCh: make(chan int32, 100), + completionIndex: -1, + } +} + +// OnCreate attaches a create hook to the given Fake. The hook function is +// executed every time a resource of the given type is created. +func (h *Hooks) OnCreate(fake *kubetesting.Fake, resource string, rf CreateHookFunc) { + index := atomic.AddInt32(&h.completionIndex, 1) + fake.PrependReactor("create", resource, func(a kubetesting.Action) (bool, runtime.Object, error) { + obj := a.(kubetesting.CreateActionImpl).Object + + h.mutex.RLock() + defer h.mutex.RUnlock() + if !h.closed && rf(obj) == HookComplete { + h.completionCh <- index + } + return false, nil, nil + }) +} + +// OnUpdate attaches an update hook to the given Fake. The hook function is +// executed every time a resource of the given type is updated. +func (h *Hooks) OnUpdate(fake *kubetesting.Fake, resource string, rf UpdateHookFunc) { + index := atomic.AddInt32(&h.completionIndex, 1) + fake.PrependReactor("update", resource, func(a kubetesting.Action) (bool, runtime.Object, error) { + obj := a.(kubetesting.UpdateActionImpl).Object + + h.mutex.RLock() + defer h.mutex.RUnlock() + if !h.closed && rf(obj) == HookComplete { + h.completionCh <- index + } + return false, nil, nil + }) +} + +// OnDelete attaches a delete hook to the given Fake. The hook function is +// executed every time a resource of the given type is deleted. +func (h *Hooks) OnDelete(fake *kubetesting.Fake, resource string, rf DeleteHookFunc) { + index := atomic.AddInt32(&h.completionIndex, 1) + fake.PrependReactor("delete", resource, func(a kubetesting.Action) (bool, runtime.Object, error) { + name := a.(kubetesting.DeleteActionImpl).Name + + h.mutex.RLock() + defer h.mutex.RUnlock() + if !h.closed && rf(name) == HookComplete { + h.completionCh <- index + } + return false, nil, nil + }) +} + +// WaitForHooks waits until all attached hooks have returned true at least once. +// If the given timeout expires before that happens, an error is returned. +// The registered actions will no longer be executed after WaitForHooks has +// returned. +func (h *Hooks) WaitForHooks(timeout time.Duration) error { + defer func() { + h.mutex.Lock() + defer h.mutex.Unlock() + h.closed = true + }() + + ci := int(atomic.LoadInt32(&h.completionIndex)) + if ci == -1 { + return nil + } + + // Convert index to count. + ci++ + timer := time.After(timeout) + hookCompletions := map[int32]HookResult{} + for { + select { + case i := <-h.completionCh: + hookCompletions[i] = HookComplete + if len(hookCompletions) == ci { + atomic.StoreInt32(&h.completionIndex, -1) + return nil + } + case <-timer: + return errors.New("timed out waiting for hooks to complete") + } + } +} diff --git a/vendor/knative.dev/pkg/reconciler/testing/reactions.go b/vendor/knative.dev/pkg/reconciler/testing/reactions.go new file mode 100644 index 000000000..09caae767 --- /dev/null +++ b/vendor/knative.dev/pkg/reconciler/testing/reactions.go @@ -0,0 +1,66 @@ +/* +Copyright 2019 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testing + +import ( + "context" + "fmt" + + "k8s.io/apimachinery/pkg/runtime" + clientgotesting "k8s.io/client-go/testing" + + "knative.dev/pkg/apis" +) + +// InduceFailure is used in conjunction with TableTest's WithReactors field. +// Tests that want to induce a failure in a row of a TableTest would add: +// WithReactors: []clientgotesting.ReactionFunc{ +// // Makes calls to create revisions return an error. +// InduceFailure("create", "revisions"), +// }, +func InduceFailure(verb, resource string) clientgotesting.ReactionFunc { + return func(action clientgotesting.Action) (handled bool, ret runtime.Object, err error) { + if !action.Matches(verb, resource) { + return false, nil, nil + } + return true, nil, fmt.Errorf("inducing failure for %s %s", action.GetVerb(), action.GetResource().Resource) + } +} + +func ValidateCreates(ctx context.Context, action clientgotesting.Action) (handled bool, ret runtime.Object, err error) { + got := action.(clientgotesting.CreateAction).GetObject() + obj, ok := got.(apis.Validatable) + if !ok { + return false, nil, nil + } + if err := obj.Validate(ctx); err != nil { + return true, nil, err + } + return false, nil, nil +} + +func ValidateUpdates(ctx context.Context, action clientgotesting.Action) (handled bool, ret runtime.Object, err error) { + got := action.(clientgotesting.UpdateAction).GetObject() + obj, ok := got.(apis.Validatable) + if !ok { + return false, nil, nil + } + if err := obj.Validate(ctx); err != nil { + return true, nil, err + } + return false, nil, nil +} diff --git a/vendor/knative.dev/pkg/reconciler/testing/sorter.go b/vendor/knative.dev/pkg/reconciler/testing/sorter.go new file mode 100644 index 000000000..27061b69d --- /dev/null +++ b/vendor/knative.dev/pkg/reconciler/testing/sorter.go @@ -0,0 +1,93 @@ +/* +Copyright 2019 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package testing + +import ( + "fmt" + "reflect" + + "k8s.io/apimachinery/pkg/runtime" + util_runtime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/tools/cache" +) + +func NewObjectSorter(scheme *runtime.Scheme) ObjectSorter { + cache := make(map[reflect.Type]cache.Indexer) + + for _, v := range scheme.AllKnownTypes() { + cache[v] = emptyIndexer() + } + + ls := ObjectSorter{ + cache: cache, + } + + return ls +} + +type ObjectSorter struct { + cache map[reflect.Type]cache.Indexer +} + +func (o *ObjectSorter) AddObjects(objs ...runtime.Object) { + for _, obj := range objs { + t := reflect.TypeOf(obj).Elem() + indexer, ok := o.cache[t] + if !ok { + panic(fmt.Sprintf("Unrecognized type %T", obj)) + } + indexer.Add(obj) + } +} + +func (o *ObjectSorter) ObjectsForScheme(scheme *runtime.Scheme) []runtime.Object { + var objs []runtime.Object + + for _, t := range scheme.AllKnownTypes() { + indexer := o.cache[t] + for _, item := range indexer.List() { + objs = append(objs, item.(runtime.Object)) + } + } + + return objs +} + +func (o *ObjectSorter) ObjectsForSchemeFunc(funcs ...func(scheme *runtime.Scheme) error) []runtime.Object { + scheme := runtime.NewScheme() + + for _, addToScheme := range funcs { + util_runtime.Must(addToScheme(scheme)) + } + + return o.ObjectsForScheme(scheme) +} + +func (o *ObjectSorter) IndexerForObjectType(obj runtime.Object) cache.Indexer { + objType := reflect.TypeOf(obj).Elem() + + indexer, ok := o.cache[objType] + + if !ok { + panic(fmt.Sprintf("indexer for type %v doesn't exist", objType.Name())) + } + + return indexer +} + +func emptyIndexer() cache.Indexer { + return cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{}) +} diff --git a/vendor/knative.dev/pkg/reconciler/testing/stats.go b/vendor/knative.dev/pkg/reconciler/testing/stats.go new file mode 100644 index 000000000..1d389a15d --- /dev/null +++ b/vendor/knative.dev/pkg/reconciler/testing/stats.go @@ -0,0 +1,40 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testing + +import ( + "fmt" + "time" +) + +// FakeStatsReporter is a fake implementation of StatsReporter +type FakeStatsReporter struct { + servicesReady map[string]int +} + +func (r *FakeStatsReporter) ReportServiceReady(namespace, service string, d time.Duration) error { + key := fmt.Sprintf("%s/%s", namespace, service) + if r.servicesReady == nil { + r.servicesReady = make(map[string]int) + } + r.servicesReady[key]++ + return nil +} + +func (r *FakeStatsReporter) GetServiceReadyStats() map[string]int { + return r.servicesReady +} diff --git a/vendor/knative.dev/pkg/reconciler/testing/table.go b/vendor/knative.dev/pkg/reconciler/testing/table.go new file mode 100644 index 000000000..2e8ce0759 --- /dev/null +++ b/vendor/knative.dev/pkg/reconciler/testing/table.go @@ -0,0 +1,365 @@ +/* +Copyright 2019 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testing + +import ( + "context" + "path" + "reflect" + "strings" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/apimachinery/pkg/runtime" + clientgotesting "k8s.io/client-go/testing" + "k8s.io/client-go/tools/cache" + "knative.dev/pkg/controller" + "knative.dev/pkg/kmeta" + _ "knative.dev/pkg/system/testing" // Setup system.Namespace() +) + +// TableRow holds a single row of our table test. +type TableRow struct { + // Name is a descriptive name for this test suitable as a first argument to t.Run() + Name string + + // Ctx is the context to pass to Reconcile. Defaults to context.Background() + Ctx context.Context + + // Objects holds the state of the world at the onset of reconciliation. + Objects []runtime.Object + + // Key is the parameter to reconciliation. + // This has the form "namespace/name". + Key string + + // WantErr holds whether we should expect the reconciliation to result in an error. + WantErr bool + + // WantCreates holds the ordered list of Create calls we expect during reconciliation. + WantCreates []runtime.Object + + // WantUpdates holds the ordered list of Update calls we expect during reconciliation. + WantUpdates []clientgotesting.UpdateActionImpl + + // WantStatusUpdates holds the ordered list of Update calls, with `status` subresource set, + // that we expect during reconciliation. + WantStatusUpdates []clientgotesting.UpdateActionImpl + + // WantDeletes holds the ordered list of Delete calls we expect during reconciliation. + WantDeletes []clientgotesting.DeleteActionImpl + + // WantDeleteCollections holds the ordered list of DeleteCollection calls we expect during reconciliation. + WantDeleteCollections []clientgotesting.DeleteCollectionActionImpl + + // WantPatches holds the ordered list of Patch calls we expect during reconciliation. + WantPatches []clientgotesting.PatchActionImpl + + // WantEvents holds the ordered list of events we expect during reconciliation. + WantEvents []string + + // WantServiceReadyStats holds the ServiceReady stats we exepect during reconciliation. + WantServiceReadyStats map[string]int + + // WithReactors is a set of functions that are installed as Reactors for the execution + // of this row of the table-driven-test. + WithReactors []clientgotesting.ReactionFunc + + // For cluster-scoped resources like ClusterIngress, it does not have to be + // in the same namespace with its child resources. + SkipNamespaceValidation bool +} + +func objKey(o runtime.Object) string { + on := o.(kmeta.Accessor) + // namespace + name is not unique, and the tests don't populate k8s kind + // information, so use GoLang's type name as part of the key. + return path.Join(reflect.TypeOf(o).String(), on.GetNamespace(), on.GetName()) +} + +// Factory returns a Reconciler.Interface to perform reconciliation in table test, +// ActionRecorderList/EventList to capture k8s actions/events produced during reconciliation +// and FakeStatsReporter to capture stats. +type Factory func(*testing.T, *TableRow) (controller.Reconciler, ActionRecorderList, EventList, *FakeStatsReporter) + +// Test executes the single table test. +func (r *TableRow) Test(t *testing.T, factory Factory) { + t.Helper() + c, recorderList, eventList, statsReporter := factory(t, r) + + // Set context to not be nil. + ctx := r.Ctx + if ctx == nil { + ctx = context.Background() + } + + // Run the Reconcile we're testing. + if err := c.Reconcile(ctx, r.Key); (err != nil) != r.WantErr { + t.Errorf("Reconcile() error = %v, WantErr %v", err, r.WantErr) + } + + expectedNamespace, _, _ := cache.SplitMetaNamespaceKey(r.Key) + + actions, err := recorderList.ActionsByVerb() + if err != nil { + t.Errorf("Error capturing actions by verb: %q", err) + } + + // Previous state is used to diff resource expected state for update requests that were missed. + objPrevState := map[string]runtime.Object{} + for _, o := range r.Objects { + objPrevState[objKey(o)] = o + } + + for i, want := range r.WantCreates { + if i >= len(actions.Creates) { + t.Errorf("Missing create: %#v", want) + continue + } + got := actions.Creates[i] + obj := got.GetObject() + objPrevState[objKey(obj)] = obj + + if !r.SkipNamespaceValidation && got.GetNamespace() != expectedNamespace { + t.Errorf("Unexpected action[%d]: %#v", i, got) + } + + if diff := cmp.Diff(want, obj, ignoreLastTransitionTime, safeDeployDiff, cmpopts.EquateEmpty()); diff != "" { + t.Errorf("Unexpected create (-want, +got): %s", diff) + } + } + if got, want := len(actions.Creates), len(r.WantCreates); got > want { + for _, extra := range actions.Creates[want:] { + t.Errorf("Extra create: %#v", extra.GetObject()) + } + } + + updates := filterUpdatesWithSubresource("", actions.Updates) + for i, want := range r.WantUpdates { + if i >= len(updates) { + wo := want.GetObject() + key := objKey(wo) + oldObj, ok := objPrevState[key] + if !ok { + t.Errorf("Object %s was never created: want: %#v", key, wo) + continue + } + t.Errorf("Missing update for %s (-want, +prevState): %s", key, + cmp.Diff(wo, oldObj, ignoreLastTransitionTime, safeDeployDiff, cmpopts.EquateEmpty())) + continue + } + + if want.GetSubresource() != "" { + t.Errorf("Expectation was invalid - it should not include a subresource: %#v", want) + } + + got := updates[i].GetObject() + + // Update the object state. + objPrevState[objKey(got)] = got + + if diff := cmp.Diff(want.GetObject(), got, ignoreLastTransitionTime, safeDeployDiff, cmpopts.EquateEmpty()); diff != "" { + t.Errorf("Unexpected update (-want, +got): %s", diff) + } + } + if got, want := len(updates), len(r.WantUpdates); got > want { + for _, extra := range updates[want:] { + t.Errorf("Extra update: %#v", extra.GetObject()) + } + } + + // TODO(#2843): refactor. + statusUpdates := filterUpdatesWithSubresource("status", actions.Updates) + for i, want := range r.WantStatusUpdates { + if i >= len(statusUpdates) { + wo := want.GetObject() + key := objKey(wo) + oldObj, ok := objPrevState[key] + if !ok { + t.Errorf("Object %s was never created: want: %#v", key, wo) + continue + } + t.Errorf("Missing status update for %s (-want, +prevState): %s", key, + cmp.Diff(wo, oldObj, ignoreLastTransitionTime, safeDeployDiff, cmpopts.EquateEmpty())) + continue + } + + got := statusUpdates[i].GetObject() + + // Update the object state. + objPrevState[objKey(got)] = got + + if diff := cmp.Diff(want.GetObject(), got, ignoreLastTransitionTime, safeDeployDiff, cmpopts.EquateEmpty()); diff != "" { + t.Errorf("Unexpected status update (-want, +got): %s\nFull: %v", diff, got) + } + } + if got, want := len(statusUpdates), len(r.WantStatusUpdates); got > want { + for _, extra := range statusUpdates[want:] { + wo := extra.GetObject() + key := objKey(wo) + oldObj, ok := objPrevState[key] + if !ok { + t.Errorf("Object %s was never created: want: %#v", key, wo) + continue + } + t.Errorf("Extra status update for %s (-extra, +prevState): %s", key, + cmp.Diff(wo, oldObj, ignoreLastTransitionTime, safeDeployDiff, cmpopts.EquateEmpty())) + } + } + + if len(statusUpdates)+len(updates) != len(actions.Updates) { + var unexpected []runtime.Object + + for _, update := range actions.Updates { + if update.GetSubresource() != "status" && update.GetSubresource() != "" { + unexpected = append(unexpected, update.GetObject()) + } + } + + t.Errorf("Unexpected subresource updates occurred %#v", unexpected) + } + + for i, want := range r.WantDeletes { + if i >= len(actions.Deletes) { + t.Errorf("Missing delete: %#v", want) + continue + } + got := actions.Deletes[i] + if got.GetName() != want.GetName() { + t.Errorf("Unexpected delete[%d]: %#v", i, got) + } + if !r.SkipNamespaceValidation && got.GetNamespace() != expectedNamespace { + t.Errorf("Unexpected delete[%d]: %#v", i, got) + } + } + if got, want := len(actions.Deletes), len(r.WantDeletes); got > want { + for _, extra := range actions.Deletes[want:] { + t.Errorf("Extra delete: %s/%s", extra.GetNamespace(), extra.GetName()) + } + } + + for i, want := range r.WantDeleteCollections { + if i >= len(actions.DeleteCollections) { + t.Errorf("Missing delete-collection: %#v", want) + continue + } + got := actions.DeleteCollections[i] + if got, want := got.GetListRestrictions().Labels, want.GetListRestrictions().Labels; (got != nil) != (want != nil) || got.String() != want.String() { + t.Errorf("Unexpected delete-collection[%d].Labels = %v, wanted %v", i, got, want) + } + if got, want := got.GetListRestrictions().Fields, want.GetListRestrictions().Fields; (got != nil) != (want != nil) || got.String() != want.String() { + t.Errorf("Unexpected delete-collection[%d].Fields = %v, wanted %v", i, got, want) + } + if !r.SkipNamespaceValidation && got.GetNamespace() != expectedNamespace { + t.Errorf("Unexpected delete-collection[%d]: %#v, wanted %s", i, got, expectedNamespace) + } + } + if got, want := len(actions.DeleteCollections), len(r.WantDeleteCollections); got > want { + for _, extra := range actions.DeleteCollections[want:] { + t.Errorf("Extra delete-collection: %#v", extra) + } + } + + for i, want := range r.WantPatches { + if i >= len(actions.Patches) { + t.Errorf("Missing patch: %#v; raw: %s", want, string(want.GetPatch())) + continue + } + + got := actions.Patches[i] + if got.GetName() != want.GetName() { + t.Errorf("Unexpected patch[%d]: %#v", i, got) + } + if !r.SkipNamespaceValidation && got.GetNamespace() != expectedNamespace { + t.Errorf("Unexpected patch[%d]: %#v", i, got) + } + if diff := cmp.Diff(string(want.GetPatch()), string(got.GetPatch())); diff != "" { + t.Errorf("Unexpected patch(-want, +got): %s", diff) + } + } + if got, want := len(actions.Patches), len(r.WantPatches); got > want { + for _, extra := range actions.Patches[want:] { + t.Errorf("Extra patch: %#v; raw: %s", extra, string(extra.GetPatch())) + } + } + + gotEvents := eventList.Events() + for i, want := range r.WantEvents { + if i >= len(gotEvents) { + t.Errorf("Missing event: %s", want) + continue + } + + if diff := cmp.Diff(want, gotEvents[i]); diff != "" { + t.Errorf("unexpected event(-want, +got): %s", diff) + } + } + if got, want := len(gotEvents), len(r.WantEvents); got > want { + for _, extra := range gotEvents[want:] { + t.Errorf("Extra event: %s", extra) + } + } + + gotStats := statsReporter.GetServiceReadyStats() + if diff := cmp.Diff(r.WantServiceReadyStats, gotStats); diff != "" { + t.Errorf("Unexpected service ready stats (-want, +got): %s", diff) + } +} + +func filterUpdatesWithSubresource( + subresource string, + actions []clientgotesting.UpdateAction) (result []clientgotesting.UpdateAction) { + for _, action := range actions { + if action.GetSubresource() == subresource { + result = append(result, action) + } + } + return +} + +// TableTest represents a list of TableRow tests instances. +type TableTest []TableRow + +// Test executes the whole suite of the table tests. +func (tt TableTest) Test(t *testing.T, factory Factory) { + t.Helper() + for _, test := range tt { + // Record the original objects in table. + originObjects := []runtime.Object{} + for _, obj := range test.Objects { + originObjects = append(originObjects, obj.DeepCopyObject()) + } + t.Run(test.Name, func(t *testing.T) { + t.Helper() + test.Test(t, factory) + }) + // Validate cached objects do not get soiled after controller loops + if diff := cmp.Diff(originObjects, test.Objects, safeDeployDiff, cmpopts.EquateEmpty()); diff != "" { + t.Errorf("Unexpected objects in test %s (-want, +got): %v", test.Name, diff) + } + } +} + +var ( + ignoreLastTransitionTime = cmp.FilterPath(func(p cmp.Path) bool { + return strings.HasSuffix(p.String(), "LastTransitionTime.Inner.Time") + }, cmp.Ignore()) + + safeDeployDiff = cmpopts.IgnoreUnexported(resource.Quantity{}) +) diff --git a/vendor/knative.dev/pkg/reconciler/testing/tracker.go b/vendor/knative.dev/pkg/reconciler/testing/tracker.go new file mode 100644 index 000000000..480598938 --- /dev/null +++ b/vendor/knative.dev/pkg/reconciler/testing/tracker.go @@ -0,0 +1,34 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testing + +import ( + corev1 "k8s.io/api/core/v1" + + "knative.dev/pkg/tracker" +) + +// NullTracker implements Tracker. +type NullTracker struct{} + +var _ tracker.Interface = (*NullTracker)(nil) + +// OnChanged implements OnChanged. +func (*NullTracker) OnChanged(interface{}) {} + +// Track implements Track. +func (*NullTracker) Track(corev1.ObjectReference, interface{}) error { return nil } diff --git a/vendor/knative.dev/pkg/reconciler/testing/util.go b/vendor/knative.dev/pkg/reconciler/testing/util.go new file mode 100644 index 000000000..14689693f --- /dev/null +++ b/vendor/knative.dev/pkg/reconciler/testing/util.go @@ -0,0 +1,85 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package testing includes utilities for testing controllers. +package testing + +import ( + "regexp" + "testing" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/tools/cache" +) + +// KeyOrDie returns the string key of the Kubernetes object or panics if a key +// cannot be generated. +func KeyOrDie(obj interface{}) string { + key, err := cache.MetaNamespaceKeyFunc(obj) + if err != nil { + panic(err) + } + return key +} + +// ExpectNormalEventDelivery returns a hook function that can be passed to a +// Hooks.OnCreate() call to verify that an event of type Normal was created +// matching the given regular expression. For this expectation to be effective +// the test must also call Hooks.WaitForHooks(). +func ExpectNormalEventDelivery(t *testing.T, messageRegexp string) CreateHookFunc { + t.Helper() + wantRegexp, err := regexp.Compile(messageRegexp) + if err != nil { + t.Fatalf("Invalid regular expression: %v", err) + } + return func(obj runtime.Object) HookResult { + t.Helper() + event := obj.(*corev1.Event) + if !wantRegexp.MatchString(event.Message) { + return HookIncomplete + } + t.Logf("Got an event message matching %q: %q", wantRegexp, event.Message) + if got, want := event.Type, corev1.EventTypeNormal; got != want { + t.Errorf("unexpected event Type: %q expected: %q", got, want) + } + return HookComplete + } +} + +// ExpectWarningEventDelivery returns a hook function that can be passed to a +// Hooks.OnCreate() call to verify that an event of type Warning was created +// matching the given regular expression. For this expectation to be effective +// the test must also call Hooks.WaitForHooks(). +func ExpectWarningEventDelivery(t *testing.T, messageRegexp string) CreateHookFunc { + t.Helper() + wantRegexp, err := regexp.Compile(messageRegexp) + if err != nil { + t.Fatalf("Invalid regular expression: %v", err) + } + return func(obj runtime.Object) HookResult { + t.Helper() + event := obj.(*corev1.Event) + if !wantRegexp.MatchString(event.Message) { + return HookIncomplete + } + t.Logf("Got an event message matching %q: %q", wantRegexp, event.Message) + if got, want := event.Type, corev1.EventTypeWarning; got != want { + t.Errorf("unexpected event Type: %q expected: %q", got, want) + } + return HookComplete + } +} diff --git a/vendor/knative.dev/pkg/system/clock.go b/vendor/knative.dev/pkg/system/clock.go new file mode 100644 index 000000000..7d99d9b5c --- /dev/null +++ b/vendor/knative.dev/pkg/system/clock.go @@ -0,0 +1,32 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package system + +import ( + "time" +) + +// Mockable interface for time based testing +type Clock interface { + Now() time.Time +} + +type RealClock struct{} + +func (RealClock) Now() time.Time { + return time.Now() +} diff --git a/vendor/knative.dev/pkg/system/names.go b/vendor/knative.dev/pkg/system/names.go new file mode 100644 index 000000000..13a07c8ed --- /dev/null +++ b/vendor/knative.dev/pkg/system/names.go @@ -0,0 +1,52 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package system + +import ( + "fmt" + "os" +) + +const ( + NamespaceEnvKey = "SYSTEM_NAMESPACE" +) + +// Namespace holds the K8s namespace where our serving system +// components run. +func Namespace() string { + if ns := os.Getenv(NamespaceEnvKey); ns != "" { + return ns + } + + panic(fmt.Sprintf(`The environment variable %q is not set + +If this is a process running on Kubernetes, then it should be using the downward +API to initialize this variable via: + + env: + - name: %s + valueFrom: + fieldRef: + fieldPath: metadata.namespace + +If this is a Go unit test consuming system.Namespace() then it should add the +following import: + +import ( + _ "knative.dev/pkg/system/testing" +)`, NamespaceEnvKey, NamespaceEnvKey)) +} diff --git a/vendor/knative.dev/pkg/system/testing/names.go b/vendor/knative.dev/pkg/system/testing/names.go new file mode 100644 index 000000000..6f674f16b --- /dev/null +++ b/vendor/knative.dev/pkg/system/testing/names.go @@ -0,0 +1,27 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testing + +import ( + "os" + + "knative.dev/pkg/system" +) + +func init() { + os.Setenv(system.NamespaceEnvKey, "knative-testing") +} diff --git a/vendor/knative.dev/pkg/test/OWNERS b/vendor/knative.dev/pkg/test/OWNERS new file mode 100644 index 000000000..c50adc849 --- /dev/null +++ b/vendor/knative.dev/pkg/test/OWNERS @@ -0,0 +1,10 @@ +# The OWNERS file is used by prow to automatically merge approved PRs. + +approvers: +- productivity-approvers + +reviewers: +- productivity-reviewers + +labels: +- area/test-and-release diff --git a/vendor/github.com/knative/pkg/test/README.md b/vendor/knative.dev/pkg/test/README.md similarity index 96% rename from vendor/github.com/knative/pkg/test/README.md rename to vendor/knative.dev/pkg/test/README.md index f63661dad..60e5d484e 100644 --- a/vendor/github.com/knative/pkg/test/README.md +++ b/vendor/knative.dev/pkg/test/README.md @@ -31,8 +31,8 @@ These flags are useful for running against an existing cluster, making use of your existing [environment setup](https://github.com/knative/serving/blob/master/DEVELOPMENT.md#environment-setup). -By importing `github.com/knative/pkg/test` you get access to a global variable -called `test.Flags` which holds the values of +By importing `knative.dev/pkg/test` you get access to a global variable called +`test.Flags` which holds the values of [the command line flags](/test/README.md#flags). ```go @@ -93,7 +93,7 @@ When a `trace` metric is emitted, the format is arbitrary and can be any string. The values are: - `metric` - Indicates this log is a metric -- `` - Arbitrary string indentifying the metric +- `` - Arbitrary string identifying the metric - `` - Unix time in nanoseconds when measurement started - `` - Unix time in nanoseconds when measurement ended - `` - The difference in ms between the startTime and endTime @@ -170,8 +170,7 @@ _See [cleanup.go](./cleanup.go)._ Importing [the test library](#test-library) adds flags that are useful for end to end tests that need to run against a cluster. -Tests importing [`github.com/knative/pkg/test`](#test-library) recognize these -flags: +Tests importing [`knative.dev/pkg/test`](#test-library) recognize these flags: - [`--kubeconfig`](#specifying-kubeconfig) - [`--cluster`](#specifying-cluster) diff --git a/vendor/github.com/knative/pkg/test/cleanup.go b/vendor/knative.dev/pkg/test/cleanup.go similarity index 96% rename from vendor/github.com/knative/pkg/test/cleanup.go rename to vendor/knative.dev/pkg/test/cleanup.go index aa2c860fd..b34917262 100644 --- a/vendor/github.com/knative/pkg/test/cleanup.go +++ b/vendor/knative.dev/pkg/test/cleanup.go @@ -23,7 +23,7 @@ import ( "os" "os/signal" - "github.com/knative/pkg/test/logging" + "knative.dev/pkg/test/logging" ) // CleanupOnInterrupt will execute the function cleanup if an interrupt signal is caught diff --git a/vendor/github.com/knative/pkg/test/clients.go b/vendor/knative.dev/pkg/test/clients.go similarity index 98% rename from vendor/github.com/knative/pkg/test/clients.go rename to vendor/knative.dev/pkg/test/clients.go index fbd9e6583..4162a386c 100644 --- a/vendor/github.com/knative/pkg/test/clients.go +++ b/vendor/knative.dev/pkg/test/clients.go @@ -22,14 +22,14 @@ import ( "fmt" "strings" - "github.com/knative/pkg/test/logging" - "github.com/knative/pkg/test/spoof" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" k8styped "k8s.io/client-go/kubernetes/typed/core/v1" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" + "knative.dev/pkg/test/logging" + "knative.dev/pkg/test/spoof" ) // KubeClient holds instances of interfaces for making requests to kubernetes client. diff --git a/vendor/github.com/knative/pkg/test/crd.go b/vendor/knative.dev/pkg/test/crd.go similarity index 100% rename from vendor/github.com/knative/pkg/test/crd.go rename to vendor/knative.dev/pkg/test/crd.go diff --git a/vendor/github.com/knative/pkg/test/e2e_flags.go b/vendor/knative.dev/pkg/test/e2e_flags.go similarity index 100% rename from vendor/github.com/knative/pkg/test/e2e_flags.go rename to vendor/knative.dev/pkg/test/e2e_flags.go diff --git a/vendor/github.com/knative/pkg/test/ingress/ingress.go b/vendor/knative.dev/pkg/test/ingress/ingress.go similarity index 100% rename from vendor/github.com/knative/pkg/test/ingress/ingress.go rename to vendor/knative.dev/pkg/test/ingress/ingress.go diff --git a/vendor/github.com/knative/pkg/test/kube_checks.go b/vendor/knative.dev/pkg/test/kube_checks.go similarity index 85% rename from vendor/github.com/knative/pkg/test/kube_checks.go rename to vendor/knative.dev/pkg/test/kube_checks.go index 3df97e2d7..b15f8ea2b 100644 --- a/vendor/github.com/knative/pkg/test/kube_checks.go +++ b/vendor/knative.dev/pkg/test/kube_checks.go @@ -25,12 +25,12 @@ import ( "strings" "time" - "github.com/knative/pkg/test/logging" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" k8styped "k8s.io/client-go/kubernetes/typed/core/v1" + "knative.dev/pkg/test/logging" ) const ( @@ -104,12 +104,29 @@ func WaitForAllPodsRunning(client *KubeClient, namespace string) error { return WaitForPodListState(client, PodsRunning, "PodsAreRunning", namespace) } +// WaitForPodRunning waits for the given pod to be in running state +func WaitForPodRunning(client *KubeClient, name string, namespace string) error { + p := client.Kube.CoreV1().Pods(namespace) + return wait.PollImmediate(interval, podTimeout, func() (bool, error) { + p, err := p.Get(name, metav1.GetOptions{}) + if err != nil { + return true, err + } + return PodRunning(p), nil + }) +} + // PodsRunning will check the status conditions of the pod list and return true all pods are Running func PodsRunning(podList *corev1.PodList) (bool, error) { for _, pod := range podList.Items { - if pod.Status.Phase != corev1.PodRunning && pod.Status.Phase != corev1.PodSucceeded { + if isRunning := PodRunning(&pod); !isRunning { return false, nil } } return true, nil } + +// PodRunning will check the status conditions of the pod and return true if it's Running +func PodRunning(pod *corev1.Pod) bool { + return pod.Status.Phase == corev1.PodRunning || pod.Status.Phase == corev1.PodSucceeded +} diff --git a/vendor/github.com/knative/pkg/test/logging/logging.go b/vendor/knative.dev/pkg/test/logging/logging.go similarity index 99% rename from vendor/github.com/knative/pkg/test/logging/logging.go rename to vendor/knative.dev/pkg/test/logging/logging.go index 2c641bd7a..4d9c5a96f 100644 --- a/vendor/github.com/knative/pkg/test/logging/logging.go +++ b/vendor/knative.dev/pkg/test/logging/logging.go @@ -28,10 +28,10 @@ import ( "github.com/davecgh/go-spew/spew" "github.com/golang/glog" - "github.com/knative/pkg/logging" "go.opencensus.io/stats/view" "go.opencensus.io/trace" "go.uber.org/zap" + "knative.dev/pkg/logging" ) const ( diff --git a/vendor/github.com/knative/pkg/test/monitoring/doc.go b/vendor/knative.dev/pkg/test/monitoring/doc.go similarity index 100% rename from vendor/github.com/knative/pkg/test/monitoring/doc.go rename to vendor/knative.dev/pkg/test/monitoring/doc.go diff --git a/vendor/github.com/knative/pkg/test/monitoring/monitoring.go b/vendor/knative.dev/pkg/test/monitoring/monitoring.go similarity index 98% rename from vendor/github.com/knative/pkg/test/monitoring/monitoring.go rename to vendor/knative.dev/pkg/test/monitoring/monitoring.go index ccbad42b1..708b0cae1 100644 --- a/vendor/github.com/knative/pkg/test/monitoring/monitoring.go +++ b/vendor/knative.dev/pkg/test/monitoring/monitoring.go @@ -23,10 +23,10 @@ import ( "os/exec" "strings" - "github.com/knative/pkg/test/logging" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" + "knative.dev/pkg/test/logging" ) // CheckPortAvailability checks to see if the port is available on the machine. diff --git a/vendor/github.com/knative/pkg/test/presubmit-tests.sh b/vendor/knative.dev/pkg/test/presubmit-tests.sh similarity index 86% rename from vendor/github.com/knative/pkg/test/presubmit-tests.sh rename to vendor/knative.dev/pkg/test/presubmit-tests.sh index 500f34110..c3861911f 100644 --- a/vendor/github.com/knative/pkg/test/presubmit-tests.sh +++ b/vendor/knative.dev/pkg/test/presubmit-tests.sh @@ -18,6 +18,10 @@ # It is started by prow for each PR. # For convenience, it can also be executed manually. +# Markdown linting failures don't show up properly in Gubernator resulting +# in a net-negative contributor experience. +export DISABLE_MD_LINTING=1 + source $(dirname $0)/../vendor/github.com/knative/test-infra/scripts/presubmit-tests.sh # TODO(#17): Write integration tests. diff --git a/vendor/github.com/knative/pkg/test/request.go b/vendor/knative.dev/pkg/test/request.go similarity index 87% rename from vendor/github.com/knative/pkg/test/request.go rename to vendor/knative.dev/pkg/test/request.go index 1735b6073..462c857e2 100644 --- a/vendor/github.com/knative/pkg/test/request.go +++ b/vendor/knative.dev/pkg/test/request.go @@ -26,10 +26,29 @@ import ( "strings" "time" - "github.com/knative/pkg/test/logging" - "github.com/knative/pkg/test/spoof" + "knative.dev/pkg/test/logging" + "knative.dev/pkg/test/spoof" ) +// RequestOption enables configuration of requests +// when polling for endpoint states. +type RequestOption func(*http.Request) + +// WithHeader will add the provided headers to the request. +func WithHeader(header http.Header) RequestOption { + return func(r *http.Request) { + if r.Header == nil { + r.Header = header + return + } + for key, values := range header { + for _, value := range values { + r.Header.Add(key, value) + } + } + } +} + // Retrying modifies a ResponseChecker to retry certain response codes. func Retrying(rc spoof.ResponseChecker, codes ...int) spoof.ResponseChecker { return func(resp *spoof.Response) (bool, error) { @@ -115,8 +134,8 @@ func MatchesAllOf(checkers ...spoof.ResponseChecker) spoof.ResponseChecker { // the domain in the request headers, otherwise it will make the request directly to domain. // desc will be used to name the metric that is emitted to track how long it took for the // domain to get into the state checked by inState. Commas in `desc` must be escaped. -func WaitForEndpointState(kubeClient *KubeClient, logf logging.FormatLogger, theURL string, inState spoof.ResponseChecker, desc string, resolvable bool) (*spoof.Response, error) { - return WaitForEndpointStateWithTimeout(kubeClient, logf, theURL, inState, desc, resolvable, spoof.RequestTimeout) +func WaitForEndpointState(kubeClient *KubeClient, logf logging.FormatLogger, theURL string, inState spoof.ResponseChecker, desc string, resolvable bool, opts ...RequestOption) (*spoof.Response, error) { + return WaitForEndpointStateWithTimeout(kubeClient, logf, theURL, inState, desc, resolvable, spoof.RequestTimeout, opts...) } // WaitForEndpointStateWithTimeout will poll an endpoint until inState indicates the state is achieved @@ -127,7 +146,7 @@ func WaitForEndpointState(kubeClient *KubeClient, logf logging.FormatLogger, the // domain to get into the state checked by inState. Commas in `desc` must be escaped. func WaitForEndpointStateWithTimeout( kubeClient *KubeClient, logf logging.FormatLogger, theURL string, inState spoof.ResponseChecker, - desc string, resolvable bool, timeout time.Duration) (*spoof.Response, error) { + desc string, resolvable bool, timeout time.Duration, opts ...RequestOption) (*spoof.Response, error) { defer logging.GetEmitableSpan(context.Background(), fmt.Sprintf("WaitForEndpointState/%s", desc)).End() // Try parsing the "theURL" with and without a scheme. @@ -144,6 +163,10 @@ func WaitForEndpointStateWithTimeout( return nil, err } + for _, opt := range opts { + opt(req) + } + client, err := NewSpoofingClient(kubeClient, logf, asURL.Hostname(), resolvable) if err != nil { return nil, err diff --git a/vendor/github.com/knative/pkg/test/spoof/error_checks.go b/vendor/knative.dev/pkg/test/spoof/error_checks.go similarity index 100% rename from vendor/github.com/knative/pkg/test/spoof/error_checks.go rename to vendor/knative.dev/pkg/test/spoof/error_checks.go diff --git a/vendor/github.com/knative/pkg/test/spoof/spoof.go b/vendor/knative.dev/pkg/test/spoof/spoof.go similarity index 98% rename from vendor/github.com/knative/pkg/test/spoof/spoof.go rename to vendor/knative.dev/pkg/test/spoof/spoof.go index f25bb07d6..5e58905bb 100644 --- a/vendor/github.com/knative/pkg/test/spoof/spoof.go +++ b/vendor/knative.dev/pkg/test/spoof/spoof.go @@ -24,12 +24,12 @@ import ( "net/http" "time" - ingress "github.com/knative/pkg/test/ingress" - "github.com/knative/pkg/test/logging" - "github.com/knative/pkg/test/zipkin" "github.com/pkg/errors" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes" + ingress "knative.dev/pkg/test/ingress" + "knative.dev/pkg/test/logging" + "knative.dev/pkg/test/zipkin" "go.opencensus.io/plugin/ochttp" "go.opencensus.io/plugin/ochttp/propagation/b3" @@ -106,7 +106,7 @@ func New(kubeClientset *kubernetes.Clientset, logf logging.FormatLogger, domain if endpointOverride == "" { var err error // If the domain that the Route controller is configured to assign to Route.Status.Domain - // (the domainSuffix) is not resolvable, we need to retrieve the the endpoint and spoof + // (the domainSuffix) is not resolvable, we need to retrieve the endpoint and spoof // the Host in our requests. e, err = ingress.GetIngressEndpoint(kubeClientset) if err != nil { diff --git a/vendor/github.com/knative/pkg/test/zipkin/doc.go b/vendor/knative.dev/pkg/test/zipkin/doc.go similarity index 100% rename from vendor/github.com/knative/pkg/test/zipkin/doc.go rename to vendor/knative.dev/pkg/test/zipkin/doc.go diff --git a/vendor/github.com/knative/pkg/test/zipkin/util.go b/vendor/knative.dev/pkg/test/zipkin/util.go similarity index 98% rename from vendor/github.com/knative/pkg/test/zipkin/util.go rename to vendor/knative.dev/pkg/test/zipkin/util.go index add07498f..9196db9f2 100644 --- a/vendor/github.com/knative/pkg/test/zipkin/util.go +++ b/vendor/knative.dev/pkg/test/zipkin/util.go @@ -25,10 +25,10 @@ import ( "net/http" "sync" - "github.com/knative/pkg/test/logging" - "github.com/knative/pkg/test/monitoring" "go.opencensus.io/trace" "k8s.io/client-go/kubernetes" + "knative.dev/pkg/test/logging" + "knative.dev/pkg/test/monitoring" ) const ( diff --git a/vendor/knative.dev/pkg/tracker/doc.go b/vendor/knative.dev/pkg/tracker/doc.go new file mode 100644 index 000000000..a54e6affe --- /dev/null +++ b/vendor/knative.dev/pkg/tracker/doc.go @@ -0,0 +1,21 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package tracker defines a utility to enable Reconcilers to trigger +// reconciliations when objects that are cross-referenced change, so +// that the level-based reconciliation can react to the change. The +// prototypical cross-reference in Kubernetes is corev1.ObjectReference. +package tracker diff --git a/vendor/knative.dev/pkg/tracker/enqueue.go b/vendor/knative.dev/pkg/tracker/enqueue.go new file mode 100644 index 000000000..278fbc0a1 --- /dev/null +++ b/vendor/knative.dev/pkg/tracker/enqueue.go @@ -0,0 +1,169 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package tracker + +import ( + "fmt" + "sort" + "strings" + "sync" + "time" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/client-go/tools/cache" + + "knative.dev/pkg/kmeta" +) + +// New returns an implementation of Interface that lets a Reconciler +// register a particular resource as watching an ObjectReference for +// a particular lease duration. This watch must be refreshed +// periodically (e.g. by a controller resync) or it will expire. +// +// When OnChanged is called by the informer for a particular +// GroupVersionKind, the provided callback is called with the "key" +// of each object actively watching the changed object. +func New(callback func(string), lease time.Duration) Interface { + return &impl{ + leaseDuration: lease, + cb: callback, + } +} + +type impl struct { + m sync.Mutex + // mapping maps from an object reference to the set of + // keys for objects watching it. + mapping map[corev1.ObjectReference]set + + // The amount of time that an object may watch another + // before having to renew the lease. + leaseDuration time.Duration + + cb func(string) +} + +// Check that impl implements Interface. +var _ Interface = (*impl)(nil) + +// set is a map from keys to expirations +type set map[string]time.Time + +// Track implements Interface. +func (i *impl) Track(ref corev1.ObjectReference, obj interface{}) error { + invalidFields := map[string][]string{ + "APIVersion": validation.IsQualifiedName(ref.APIVersion), + "Kind": validation.IsCIdentifier(ref.Kind), + "Namespace": validation.IsDNS1123Label(ref.Namespace), + "Name": validation.IsDNS1123Subdomain(ref.Name), + } + fieldErrors := []string{} + for k, v := range invalidFields { + for _, msg := range v { + fieldErrors = append(fieldErrors, fmt.Sprintf("%s: %s", k, msg)) + } + } + if len(fieldErrors) > 0 { + sort.Strings(fieldErrors) + return fmt.Errorf("Invalid ObjectReference:\n%s", strings.Join(fieldErrors, "\n")) + } + + key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) + if err != nil { + return err + } + + i.m.Lock() + defer i.m.Unlock() + if i.mapping == nil { + i.mapping = make(map[corev1.ObjectReference]set) + } + + l, ok := i.mapping[ref] + if !ok { + l = set{} + } + if expiry, ok := l[key]; !ok || isExpired(expiry) { + // When covering an uncovered key, immediately call the + // registered callback to ensure that the following pattern + // doesn't create problems: + // foo, err := lister.Get(key) + // // Later... + // err := tracker.Track(fooRef, parent) + // In this example, "Later" represents a window where "foo" may + // have changed or been created while the Track is not active. + // The simplest way of eliminating such a window is to call the + // callback to "catch up" immediately following new + // registrations. + i.cb(key) + } + // Overwrite the key with a new expiration. + l[key] = time.Now().Add(i.leaseDuration) + + i.mapping[ref] = l + return nil +} + +func objectReference(item kmeta.Accessor) corev1.ObjectReference { + gvk := item.GroupVersionKind() + apiVersion, kind := gvk.ToAPIVersionAndKind() + return corev1.ObjectReference{ + APIVersion: apiVersion, + Kind: kind, + Namespace: item.GetNamespace(), + Name: item.GetName(), + } +} + +func isExpired(expiry time.Time) bool { + return time.Now().After(expiry) +} + +// OnChanged implements Interface. +func (i *impl) OnChanged(obj interface{}) { + item, err := kmeta.DeletionHandlingAccessor(obj) + if err != nil { + // TODO(mattmoor): We should consider logging here. + return + } + + or := objectReference(item) + + // TODO(mattmoor): Consider locking the mapping (global) for a + // smaller scope and leveraging a per-set lock to guard its access. + i.m.Lock() + defer i.m.Unlock() + s, ok := i.mapping[or] + if !ok { + // TODO(mattmoor): We should consider logging here. + return + } + + for key, expiry := range s { + // If the expiration has lapsed, then delete the key. + if isExpired(expiry) { + delete(s, key) + continue + } + i.cb(key) + } + + if len(s) == 0 { + delete(i.mapping, or) + } +} diff --git a/vendor/knative.dev/pkg/tracker/interface.go b/vendor/knative.dev/pkg/tracker/interface.go new file mode 100644 index 000000000..6481a839d --- /dev/null +++ b/vendor/knative.dev/pkg/tracker/interface.go @@ -0,0 +1,33 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package tracker + +import ( + corev1 "k8s.io/api/core/v1" +) + +// Interface defines the interface through which an object can register +// that it is tracking another object by reference. +type Interface interface { + // Track tells us that "obj" is tracking changes to the + // referenced object. + Track(ref corev1.ObjectReference, obj interface{}) error + + // OnChanged is a callback to register with the InformerFactory + // so that we are notified for appropriate object changes. + OnChanged(obj interface{}) +} diff --git a/vendor/modules.txt b/vendor/modules.txt index f01db370e..e4a5d1c64 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -4,6 +4,8 @@ cloud.google.com/go/monitoring/apiv3 cloud.google.com/go/trace/apiv2 # contrib.go.opencensus.io/exporter/ocagent v0.2.0 contrib.go.opencensus.io/exporter/ocagent +# contrib.go.opencensus.io/exporter/prometheus v0.1.0 +contrib.go.opencensus.io/exporter/prometheus # contrib.go.opencensus.io/exporter/stackdriver v0.9.1 contrib.go.opencensus.io/exporter/stackdriver contrib.go.opencensus.io/exporter/stackdriver/monitoredresource @@ -85,23 +87,27 @@ github.com/gogo/protobuf/proto github.com/gogo/protobuf/sortkeys # github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b github.com/golang/glog +# github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef +github.com/golang/groupcache/lru # github.com/golang/protobuf v1.2.0 github.com/golang/protobuf/proto github.com/golang/protobuf/ptypes/any -github.com/golang/protobuf/ptypes github.com/golang/protobuf/ptypes/timestamp github.com/golang/protobuf/ptypes/wrappers +github.com/golang/protobuf/ptypes github.com/golang/protobuf/ptypes/duration github.com/golang/protobuf/ptypes/struct github.com/golang/protobuf/ptypes/empty github.com/golang/protobuf/protoc-gen-go/descriptor # github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c github.com/google/btree -# github.com/google/go-cmp v0.2.0 +# github.com/google/go-cmp v0.3.1 github.com/google/go-cmp/cmp github.com/google/go-cmp/cmp/internal/diff +github.com/google/go-cmp/cmp/internal/flags github.com/google/go-cmp/cmp/internal/function github.com/google/go-cmp/cmp/internal/value +github.com/google/go-cmp/cmp/cmpopts # github.com/google/go-containerregistry v0.0.0-20190320210540-8d4083db9aa0 github.com/google/go-containerregistry/pkg/authn github.com/google/go-containerregistry/pkg/authn/k8schain @@ -115,6 +121,8 @@ github.com/google/go-containerregistry/pkg/v1/stream github.com/google/go-containerregistry/pkg/v1/v1util # github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367 github.com/google/gofuzz +# github.com/google/uuid v1.1.1 +github.com/google/uuid # github.com/googleapis/gax-go/v2 v2.0.4 github.com/googleapis/gax-go/v2 # github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d @@ -143,25 +151,6 @@ github.com/jonboulle/clockwork github.com/json-iterator/go # github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 github.com/kballard/go-shellquote -# github.com/knative/pkg v0.0.0-20190409220258-28cfa161499b -github.com/knative/pkg/apis -github.com/knative/pkg/apis/duck/v1beta1 -github.com/knative/pkg/apis/duck -github.com/knative/pkg/configmap -github.com/knative/pkg/controller -github.com/knative/pkg/test -github.com/knative/pkg/test/logging -github.com/knative/pkg/kmp -github.com/knative/pkg/kmeta -github.com/knative/pkg/logging -github.com/knative/pkg/logging/logkey -github.com/knative/pkg/metrics -github.com/knative/pkg/test/spoof -github.com/knative/pkg/changeset -github.com/knative/pkg/metrics/metricskey -github.com/knative/pkg/test/ingress -github.com/knative/pkg/test/zipkin -github.com/knative/pkg/test/monitoring # github.com/kr/pty v1.1.8 => github.com/creack/pty v1.1.7 github.com/kr/pty # github.com/mattbaird/jsonpatch v0.0.0-20171005235357-81af80346b1a @@ -209,20 +198,18 @@ github.com/spf13/pflag # github.com/stretchr/testify v1.2.2 github.com/stretchr/testify/require github.com/stretchr/testify/assert -# github.com/tektoncd/pipeline v0.5.2 +# github.com/tektoncd/pipeline v0.6.0 github.com/tektoncd/pipeline/pkg/client/clientset/versioned github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1 github.com/tektoncd/pipeline/pkg/reconciler/v1alpha1/taskrun/resources github.com/tektoncd/pipeline/pkg/client/informers/externalversions +github.com/tektoncd/pipeline/test github.com/tektoncd/pipeline/test/builder github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1 github.com/tektoncd/pipeline/pkg/apis/config github.com/tektoncd/pipeline/pkg/apis/pipeline github.com/tektoncd/pipeline/pkg/list -github.com/tektoncd/pipeline/pkg/merge github.com/tektoncd/pipeline/pkg/names -github.com/tektoncd/pipeline/pkg/templating -github.com/tektoncd/pipeline/test github.com/tektoncd/pipeline/pkg/client/clientset/versioned/fake github.com/tektoncd/pipeline/pkg/reconciler/v1alpha1/pipelinerun/resources github.com/tektoncd/pipeline/pkg/artifacts @@ -232,28 +219,48 @@ github.com/tektoncd/pipeline/pkg/credentials/gitcreds github.com/tektoncd/pipeline/pkg/reconciler/v1alpha1/taskrun/entrypoint github.com/tektoncd/pipeline/pkg/client/informers/externalversions/internalinterfaces github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline -github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1alpha1 +github.com/tektoncd/pipeline/pkg/client/injection/client/fake +github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/clustertask/fake +github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/condition/fake +github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/pipeline/fake +github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/pipelineresource/fake +github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/pipelinerun/fake +github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/task/fake +github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/taskrun/fake +github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/fake github.com/tektoncd/pipeline/pkg/system github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1alpha1 +github.com/tektoncd/pipeline/pkg/client/injection/client +github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/factory/fake +github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/clustertask +github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/condition +github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/pipeline +github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/pipelineresource +github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/pipelinerun +github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/task +github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/taskrun +github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/factory # github.com/tektoncd/plumbing v0.0.0-20190604151109-373083123d6a github.com/tektoncd/plumbing/scripts -# go.opencensus.io v0.19.2 +# go.opencensus.io v0.21.0 go.opencensus.io/trace -go.opencensus.io/stats -go.opencensus.io/stats/view -go.opencensus.io/tag -go.opencensus.io/exemplar go.opencensus.io/internal go.opencensus.io/trace/internal go.opencensus.io/trace/tracestate -go.opencensus.io/exporter/prometheus +go.opencensus.io/stats +go.opencensus.io/stats/view +go.opencensus.io/tag +go.opencensus.io +go.opencensus.io/metric/metricdata go.opencensus.io/stats/internal go.opencensus.io/internal/tagencoding +go.opencensus.io/metric/metricproducer go.opencensus.io/plugin/ochttp go.opencensus.io/plugin/ochttp/propagation/b3 -go.opencensus.io +go.opencensus.io/resource +go.opencensus.io/metric/metricexport go.opencensus.io/trace/propagation go.opencensus.io/plugin/ochttp/propagation/tracecontext go.opencensus.io/plugin/ocgrpc @@ -266,9 +273,11 @@ go.uber.org/zap go.uber.org/zap/zaptest/observer go.uber.org/zap/internal/bufferpool go.uber.org/zap/zapcore +go.uber.org/zap/zaptest go.uber.org/zap/buffer go.uber.org/zap/internal/color go.uber.org/zap/internal/exit +go.uber.org/zap/internal/ztest # golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5 golang.org/x/crypto/ssh/terminal golang.org/x/crypto/pkcs12 @@ -383,7 +392,6 @@ gopkg.in/inf.v0 gopkg.in/yaml.v2 # k8s.io/api v0.0.0-20190226173710-145d52631d00 k8s.io/api/core/v1 -k8s.io/api/authentication/v1 k8s.io/api/admissionregistration/v1alpha1 k8s.io/api/admissionregistration/v1beta1 k8s.io/api/apps/v1 @@ -410,6 +418,7 @@ k8s.io/api/settings/v1alpha1 k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 +k8s.io/api/authentication/v1 k8s.io/api/authentication/v1beta1 k8s.io/api/authorization/v1 k8s.io/api/authorization/v1beta1 @@ -428,6 +437,7 @@ k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/pkg/util/errors k8s.io/apimachinery/pkg/util/validation k8s.io/apimachinery/pkg/api/equality +k8s.io/apimachinery/pkg/util/strategicpatch k8s.io/apimachinery/pkg/api/resource k8s.io/apimachinery/pkg/conversion k8s.io/apimachinery/pkg/labels @@ -437,7 +447,6 @@ k8s.io/apimachinery/pkg/util/runtime k8s.io/apimachinery/pkg/api/meta k8s.io/apimachinery/pkg/util/json k8s.io/apimachinery/pkg/runtime/serializer -k8s.io/apimachinery/pkg/api/validation k8s.io/apimachinery/pkg/util/cache k8s.io/apimachinery/pkg/util/clock k8s.io/apimachinery/pkg/util/diff @@ -448,19 +457,19 @@ k8s.io/apimachinery/pkg/version k8s.io/apimachinery/pkg/util/validation/field k8s.io/apimachinery/pkg/runtime/serializer/json k8s.io/apimachinery/pkg/runtime/serializer/versioning -k8s.io/apimachinery/pkg/util/strategicpatch k8s.io/apimachinery/pkg/util/rand +k8s.io/apimachinery/pkg/apis/meta/v1/unstructured +k8s.io/apimachinery/pkg/util/mergepatch +k8s.io/apimachinery/third_party/forked/golang/json +k8s.io/apimachinery/pkg/api/validation k8s.io/apimachinery/third_party/forked/golang/reflect k8s.io/apimachinery/pkg/apis/meta/v1beta1 -k8s.io/apimachinery/pkg/apis/meta/v1/unstructured k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructuredscheme k8s.io/apimachinery/pkg/runtime/serializer/protobuf k8s.io/apimachinery/pkg/runtime/serializer/recognizer -k8s.io/apimachinery/pkg/apis/meta/v1/validation k8s.io/apimachinery/pkg/apis/meta/internalversion k8s.io/apimachinery/pkg/util/framer -k8s.io/apimachinery/pkg/util/mergepatch -k8s.io/apimachinery/third_party/forked/golang/json +k8s.io/apimachinery/pkg/apis/meta/v1/validation # k8s.io/cli-runtime v0.0.0-20190325194458-f2b4781c3ae1 k8s.io/cli-runtime/pkg/genericclioptions k8s.io/cli-runtime/pkg/genericclioptions/printers @@ -541,6 +550,8 @@ k8s.io/client-go/informers/settings k8s.io/client-go/informers/storage k8s.io/client-go/kubernetes/scheme k8s.io/client-go/tools/reference +k8s.io/client-go/informers/core/v1 +k8s.io/client-go/tools/record k8s.io/client-go/third_party/forked/golang/template k8s.io/client-go/util/integer k8s.io/client-go/pkg/apis/clientauthentication @@ -548,7 +559,6 @@ k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1 k8s.io/client-go/pkg/apis/clientauthentication/v1beta1 k8s.io/client-go/util/connrotation k8s.io/client-go/tools/clientcmd/api/v1 -k8s.io/client-go/informers/core/v1 k8s.io/client-go/discovery/fake k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake @@ -581,6 +591,7 @@ k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake k8s.io/client-go/kubernetes/typed/storage/v1/fake k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake +k8s.io/client-go/dynamic k8s.io/client-go/informers/admissionregistration/v1alpha1 k8s.io/client-go/informers/admissionregistration/v1beta1 k8s.io/client-go/informers/apps/v1 @@ -607,9 +618,8 @@ k8s.io/client-go/informers/settings/v1alpha1 k8s.io/client-go/informers/storage/v1 k8s.io/client-go/informers/storage/v1alpha1 k8s.io/client-go/informers/storage/v1beta1 -k8s.io/client-go/dynamic -k8s.io/client-go/util/workqueue k8s.io/client-go/listers/core/v1 +k8s.io/client-go/util/workqueue k8s.io/client-go/listers/admissionregistration/v1alpha1 k8s.io/client-go/listers/admissionregistration/v1beta1 k8s.io/client-go/listers/apps/v1 @@ -647,5 +657,36 @@ k8s.io/kubernetes/pkg/credentialprovider/azure k8s.io/kubernetes/pkg/credentialprovider/gcp k8s.io/kubernetes/pkg/credentialprovider/secrets k8s.io/kubernetes/pkg/cloudprovider/providers/azure/auth +# knative.dev/pkg v0.0.0-20190719141030-e4bc08cc8ded +knative.dev/pkg/apis/duck/v1beta1 +knative.dev/pkg/reconciler/testing +knative.dev/pkg/apis +knative.dev/pkg/apis/duck +knative.dev/pkg/controller +knative.dev/pkg/injection/clients/kubeclient/fake +knative.dev/pkg/injection/informers/kubeinformers/corev1/pod/fake +knative.dev/pkg/test +knative.dev/pkg/test/logging +knative.dev/pkg/injection +knative.dev/pkg/kmeta +knative.dev/pkg/logging/testing +knative.dev/pkg/system/testing +knative.dev/pkg/tracker +knative.dev/pkg/configmap +knative.dev/pkg/kmp +knative.dev/pkg/logging +knative.dev/pkg/logging/logkey +knative.dev/pkg/metrics +knative.dev/pkg/injection/clients/kubeclient +knative.dev/pkg/injection/informers/kubeinformers/corev1/pod +knative.dev/pkg/injection/informers/kubeinformers/factory/fake +knative.dev/pkg/test/spoof +knative.dev/pkg/system +knative.dev/pkg/changeset +knative.dev/pkg/metrics/metricskey +knative.dev/pkg/injection/informers/kubeinformers/factory +knative.dev/pkg/test/ingress +knative.dev/pkg/test/zipkin +knative.dev/pkg/test/monitoring # sigs.k8s.io/yaml v1.1.0 sigs.k8s.io/yaml From 7e0a5009ee82c0fcad81de2521b543e448537493 Mon Sep 17 00:00:00 2001 From: hriships Date: Tue, 20 Aug 2019 15:44:41 +0530 Subject: [PATCH 2/2] fixes pipelinerun desribe test --- pkg/cmd/pipelineresource/delete_test.go | 9 ++++----- pkg/cmd/pipelinerun/describe_test.go | 17 +++++++---------- 2 files changed, 11 insertions(+), 15 deletions(-) diff --git a/pkg/cmd/pipelineresource/delete_test.go b/pkg/cmd/pipelineresource/delete_test.go index f8ba83e80..826fc8d38 100644 --- a/pkg/cmd/pipelineresource/delete_test.go +++ b/pkg/cmd/pipelineresource/delete_test.go @@ -18,14 +18,13 @@ import ( "testing" "github.com/tektoncd/cli/pkg/test" - tu "github.com/tektoncd/cli/pkg/test" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" pipelinetest "github.com/tektoncd/pipeline/test" tb "github.com/tektoncd/pipeline/test/builder" ) func TestPipelineResourceDelete_Empty(t *testing.T) { - cs, _ := pipelinetest.SeedTestData(t, pipelinetest.Data{}) + cs, _ := test.SeedTestData(t, pipelinetest.Data{}) p := &test.Params{Tekton: cs.Pipeline} res := Command(p) @@ -34,7 +33,7 @@ func TestPipelineResourceDelete_Empty(t *testing.T) { t.Errorf("Error expected here") } expected := "Failed to delete pipelineresource \"bar\": pipelineresources.tekton.dev \"bar\" not found" - tu.AssertOutput(t, expected, err.Error()) + test.AssertOutput(t, expected, err.Error()) } func TestPipelineResourceDelete_WithParams(t *testing.T) { @@ -46,10 +45,10 @@ func TestPipelineResourceDelete_WithParams(t *testing.T) { ), } - cs, _ := pipelinetest.SeedTestData(t, pipelinetest.Data{PipelineResources: pres}) + cs, _ := test.SeedTestData(t, pipelinetest.Data{PipelineResources: pres}) p := &test.Params{Tekton: cs.Pipeline} pipelineresource := Command(p) out, _ := test.ExecuteCommand(pipelineresource, "rm", "test-1", "-n", "test-ns-1") expected := "PipelineResource deleted: test-1\n" - tu.AssertOutput(t, expected, out) + test.AssertOutput(t, expected, out) } diff --git a/pkg/cmd/pipelinerun/describe_test.go b/pkg/cmd/pipelinerun/describe_test.go index ed694aff8..a94f257c0 100644 --- a/pkg/cmd/pipelinerun/describe_test.go +++ b/pkg/cmd/pipelinerun/describe_test.go @@ -18,7 +18,6 @@ import ( "testing" "time" - "github.com/google/go-cmp/cmp" "github.com/jonboulle/clockwork" "github.com/tektoncd/cli/pkg/test" cb "github.com/tektoncd/cli/pkg/test/builder" @@ -150,7 +149,7 @@ func TestPipelineRunDescribe_multiple_taskrun_ordering(t *testing.T) { }), tb.PipelineRunTaskRunsStatus("tr-2", &v1alpha1.PipelineRunTaskRunStatus{ PipelineTaskName: "t-2", - Status: &trs[0].Status, + Status: &trs[1].Status, }), tb.PipelineRunStatusCondition(apis.Condition{ Status: corev1.ConditionTrue, @@ -165,7 +164,7 @@ func TestPipelineRunDescribe_multiple_taskrun_ordering(t *testing.T) { p := &test.Params{Tekton: cs.Pipeline, Clock: clock} pipelinerun := Command(p) - clock.Advance(9 * time.Minute) + clock.Advance(10 * time.Minute) actual, err := test.ExecuteCommand(pipelinerun, "desc", "pipeline-run", "-n", "ns") if err != nil { t.Errorf("Unexpected error: %v", err) @@ -175,8 +174,8 @@ Namespace: ns Pipeline Ref: pipeline Status -STARTED DURATION STATUS -9 minutes ago 15 minutes Succeeded +STARTED DURATION STATUS +10 minutes ago 15 minutes Succeeded Resources No resources @@ -186,12 +185,10 @@ No params Taskruns NAME TASK NAME STARTED DURATION STATUS -tr-2 t-2 7 minutes ago 3 minutes Succeeded -tr-1 t-1 7 minutes ago 3 minutes Succeeded +tr-2 t-2 5 minutes ago 4 minutes Succeeded +tr-1 t-1 8 minutes ago 3 minutes Succeeded ` - if d := cmp.Diff(expected, actual); d != "" { - t.Errorf("Unexpected output mismatch: %s", d) - } + test.AssertOutput(t, expected, actual) }