diff --git a/Gopkg.lock b/Gopkg.lock index 6bb82e35..10ece67a 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -112,6 +112,24 @@ revision = "3af367b6b30c263d47e8895973edcca9a49cf029" version = "v0.2.0" +[[projects]] + branch = "master" + digest = "1:900b120797732528f188d4cb7485d0a33f40e2867fa7f9086494966ea2203bac" + name = "github.com/google/go-containerregistry" + packages = [ + "pkg/authn", + "pkg/name", + "pkg/v1", + "pkg/v1/partial", + "pkg/v1/remote", + "pkg/v1/remote/transport", + "pkg/v1/stream", + "pkg/v1/types", + "pkg/v1/v1util", + ] + pruneopts = "NUT" + revision = "63fc47df3f2d99a254e8bea78182bb59b3a6ff69" + [[projects]] digest = "1:f9425215dccf1c63f659ec781ca46bc81804341821d0cd8d2459c5b58f8bd067" name = "github.com/google/gofuzz" @@ -258,6 +276,14 @@ pruneopts = "UT" revision = "8a63be3b3795aa2f7ae9be32d840e096ba877c78" +[[projects]] + digest = "1:4059c14e87a2de3a434430340521b5feece186c1469eff0834c29a63870de3ed" + name = "github.com/konsorten/go-windows-terminal-sequences" + packages = ["."] + pruneopts = "NUT" + revision = "5c8c8bd35d3832f5d134ae1e1e375b69a4d25242" + version = "v1.0.1" + [[projects]] digest = "1:56dbf15e091bf7926cb33a57cb6bdfc658fc6d3498d2f76f10a97ce7856f1fde" name = "github.com/markbates/inflect" @@ -306,6 +332,22 @@ revision = "5f041e8faa004a95c88a202771f4cc3e991971e6" version = "v2.0.1" +[[projects]] + digest = "1:d917313f309bda80d27274d53985bc65651f81a5b66b820749ac7f8ef061fd04" + name = "github.com/sergi/go-diff" + packages = ["diffmatchpatch"] + pruneopts = "NUT" + revision = "1744e2970ca51c86172c8190fadad617561ed6e7" + version = "v1.0.0" + +[[projects]] + digest = "1:d848e2bdc690ea54c4b49894b67a05db318a97ee6561879b814c2c1f82f61406" + name = "github.com/sirupsen/logrus" + packages = ["."] + pruneopts = "NUT" + revision = "bcd833dfe83d3cebad139e4a29ed79cb2318bf95" + version = "v1.2.0" + [[projects]] digest = "1:e3707aeaccd2adc89eba6c062fec72116fe1fc1ba71097da85b4d8ae1668a675" name = "github.com/spf13/pflag" @@ -805,12 +847,23 @@ pruneopts = "NUT" revision = "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" +[[projects]] + branch = "master" + digest = "1:3e102531435339cd5e7b4cfe35738679b77565492c01b25feba3e67ca3e9411b" + name = "k8s.io/test-infra" + packages = ["prow/logrusutil"] + pruneopts = "NUT" + revision = "bfbc61258394e31ce8da16e204cedd5b19763a3a" + [solve-meta] analyzer-name = "dep" analyzer-version = 1 input-imports = [ "github.com/google/go-cmp/cmp", "github.com/google/go-cmp/cmp/cmpopts", + "github.com/google/go-containerregistry/pkg/authn", + "github.com/google/go-containerregistry/pkg/name", + "github.com/google/go-containerregistry/pkg/v1/remote", "github.com/knative/caching/pkg/apis/caching", "github.com/knative/caching/pkg/apis/caching/v1alpha1", "github.com/knative/caching/pkg/client/clientset/versioned", @@ -831,8 +884,10 @@ "github.com/knative/pkg/webhook", "github.com/knative/test-infra/scripts", "github.com/knative/test-infra/tools/dep-collector", + "github.com/sirupsen/logrus", "go.opencensus.io/trace", "go.uber.org/zap", + "golang.org/x/oauth2/google", "golang.org/x/sync/errgroup", "k8s.io/api/core/v1", "k8s.io/apimachinery/pkg/api/errors", @@ -846,6 +901,7 @@ "k8s.io/apimachinery/pkg/types", "k8s.io/apimachinery/pkg/util/runtime", "k8s.io/apimachinery/pkg/util/sets/types", + "k8s.io/apimachinery/pkg/util/wait", "k8s.io/apimachinery/pkg/watch", "k8s.io/client-go/discovery", "k8s.io/client-go/discovery/fake", @@ -868,6 +924,7 @@ "k8s.io/code-generator/cmd/defaulter-gen", "k8s.io/code-generator/cmd/informer-gen", "k8s.io/code-generator/cmd/lister-gen", + "k8s.io/test-infra/prow/logrusutil", ] solver-name = "gps-cdcl" solver-version = 1 diff --git a/cmd/entrypoint/main.go b/cmd/entrypoint/main.go new file mode 100644 index 00000000..db0df30e --- /dev/null +++ b/cmd/entrypoint/main.go @@ -0,0 +1,43 @@ +/* +Copyright 2018 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "os" + + "github.com/knative/build/pkg/entrypoint" + "github.com/knative/build/pkg/entrypoint/options" + "github.com/sirupsen/logrus" + "k8s.io/test-infra/prow/logrusutil" +) + +func main() { + o := entrypoint.NewOptions() + if err := options.Load(o); err != nil { + logrus.Fatalf("Could not resolve options: %v", err) + } + + if err := o.Validate(); err != nil { + logrus.Fatalf("Invalid options: %v", err) + } + + logrus.SetFormatter( + logrusutil.NewDefaultFieldsFormatter(nil, logrus.Fields{"component": "entrypoint"}), + ) + + os.Exit(o.Run()) +} diff --git a/config/999-cache.yaml b/config/999-cache.yaml index a530a578..e6022e57 100644 --- a/config/999-cache.yaml +++ b/config/999-cache.yaml @@ -24,6 +24,16 @@ spec: --- apiVersion: caching.internal.knative.dev/v1alpha1 kind: Image +metadata: + name: entrypoint + namespace: knative-build +spec: + # This is the Go import path for the binary that is containerized + # and substituted here. + image: github.com/knative/build/cmd/entrypoint +--- +apiVersion: caching.internal.knative.dev/v1alpha1 +kind: Image metadata: name: git-init namespace: knative-build diff --git a/pkg/entrypoint/doc.go b/pkg/entrypoint/doc.go new file mode 100644 index 00000000..66fd2298 --- /dev/null +++ b/pkg/entrypoint/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2018 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package entrypoint is a library that knows how to wrap +// a process and write it's output and exit code to disk +package entrypoint diff --git a/pkg/entrypoint/options.go b/pkg/entrypoint/options.go new file mode 100644 index 00000000..4890fc50 --- /dev/null +++ b/pkg/entrypoint/options.go @@ -0,0 +1,115 @@ +/* +Copyright 2018 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package entrypoint + +import ( + "encoding/json" + "errors" + "flag" + "time" + + "github.com/knative/build/pkg/entrypoint/wrapper" +) + +// NewOptions returns an empty Options with no nil fields +func NewOptions() *Options { + return &Options{ + Options: &wrapper.Options{}, + } +} + +// Options exposes the configuration necessary +// for defining the process being watched and +// where in the image repository an upload will land. +type Options struct { + // Args is the process and args to run + Args []string `json:"args"` + // Timeout determines how long to wait before the + // entrypoint sends SIGINT to the process + Timeout time.Duration `json:"timeout"` + // GracePeriod determines how long to wait after + // sending SIGINT before the entrypoint sends + // SIGKILL. + GracePeriod time.Duration `json:"grace_period"` + // ArtifactDir is a directory where test processes can dump artifacts + // for upload to persistent storage (courtesy of sidecar). + // If specified, it is created by entrypoint before starting the test process. + // May be ignored if not using sidecar. + ArtifactDir string `json:"artifact_dir,omitempty"` + + *wrapper.Options +} + +// Validate ensures that the set of options are +// self-consistent and valid +func (o *Options) Validate() error { + if len(o.Args) == 0 { + return errors.New("no process to wrap specified") + } + + return o.Options.Validate() +} + +const ( + // JSONConfigEnvVar is the environment variable that + // utilities expect to find a full JSON configuration + // in when run. + JSONConfigEnvVar = "ENTRYPOINT_OPTIONS" +) + +// ConfigVar exposes the environment variable used +// to store serialized configuration +func (o *Options) ConfigVar() string { + return JSONConfigEnvVar +} + +// LoadConfig loads options from serialized config +func (o *Options) LoadConfig(config string) error { + return json.Unmarshal([]byte(config), o) +} + +// AddFlags binds flags to options +func (o *Options) AddFlags(flags *flag.FlagSet) { + flags.DurationVar(&o.Timeout, "timeout", + DefaultTimeout, "Timeout for the test command.") + flags.DurationVar(&o.GracePeriod, "grace-period", + DefaultGracePeriod, "Grace period after timeout for the test command.") + flags.StringVar(&o.ArtifactDir, "artifact-dir", + "", "directory where test artifacts should be placed for upload "+ + "to persistent storage") + flags.BoolVar(&o.ShouldWaitForPrevStep, "should-wait-for-prev-step", + DefaultShouldWaitForPrevStep, "If we should wait for prev step.") + flags.BoolVar(&o.ShouldRunPostRun, "should-run-post-run", + DefaultShouldRunPostRun, "If post run step should be run.") + flags.StringVar(&o.PreRunFile, "prerun-file", + DefaultPreRunFile, "The prerun file to wait for.") + flags.StringVar(&o.PostRunFile, "postrun-file", + DefaultPostRunFile, "If postrun file to write.") + o.Options.AddFlags(flags) +} + +// Complete internalizes command line arguments +func (o *Options) Complete(args []string) { + o.Args = args +} + +// Encode will encode the set of options in the format that +// is expected for the configuration environment variable +func Encode(options Options) (string, error) { + encoded, err := json.Marshal(options) + return string(encoded), err +} diff --git a/pkg/entrypoint/options/doc.go b/pkg/entrypoint/options/doc.go new file mode 100644 index 00000000..6a73f5ab --- /dev/null +++ b/pkg/entrypoint/options/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2017 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package options abstracts the options loading +// flow for pod utilities +package options diff --git a/pkg/entrypoint/options/load.go b/pkg/entrypoint/options/load.go new file mode 100644 index 00000000..4592a5c7 --- /dev/null +++ b/pkg/entrypoint/options/load.go @@ -0,0 +1,50 @@ +/* +Copyright 2017 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package options + +import ( + "flag" + "fmt" + "os" +) + +// OptionLoader allows loading options from either the environment or flags. +type OptionLoader interface { + ConfigVar() string + LoadConfig(config string) error + AddFlags(flags *flag.FlagSet) + Complete(args []string) +} + +// Load loads the set of options, preferring to use +// JSON config from an env var, but falling back to +// command line flags if not possible. +func Load(loader OptionLoader) error { + if jsonConfig, provided := os.LookupEnv(loader.ConfigVar()); provided { + if err := loader.LoadConfig(jsonConfig); err != nil { + return fmt.Errorf("could not load config from JSON var %s: %v", loader.ConfigVar(), err) + } + return nil + } + + fs := flag.NewFlagSet(os.Args[0], flag.ExitOnError) + loader.AddFlags(fs) + fs.Parse(os.Args[1:]) + loader.Complete(fs.Args()) + + return nil +} diff --git a/pkg/entrypoint/options_test.go b/pkg/entrypoint/options_test.go new file mode 100644 index 00000000..ccb15700 --- /dev/null +++ b/pkg/entrypoint/options_test.go @@ -0,0 +1,63 @@ +/* +Copyright 2018 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package entrypoint + +import ( + "testing" + + "github.com/knative/build/pkg/entrypoint/wrapper" +) + +func TestOptions_Validate(t *testing.T) { + var testCases = []struct { + name string + input Options + expectedErr bool + }{ + { + name: "all ok", + input: Options{ + Args: []string{"/usr/bin/true"}, + Options: &wrapper.Options{ + ProcessLog: "output.txt", + MarkerFile: "marker.txt", + }, + }, + expectedErr: false, + }, + { + name: "missing args", + input: Options{ + Options: &wrapper.Options{ + ProcessLog: "output.txt", + MarkerFile: "marker.txt", + }, + }, + expectedErr: true, + }, + } + + for _, testCase := range testCases { + err := testCase.input.Validate() + if testCase.expectedErr && err == nil { + t.Errorf("%s: expected an error but got none", testCase.name) + } + if !testCase.expectedErr && err != nil { + t.Errorf("%s: expected no error but got one: %v", testCase.name, err) + } + } +} diff --git a/pkg/entrypoint/run.go b/pkg/entrypoint/run.go new file mode 100644 index 00000000..cdaa4d02 --- /dev/null +++ b/pkg/entrypoint/run.go @@ -0,0 +1,295 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package entrypoint + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "os/signal" + "path/filepath" + "strconv" + "syscall" + "time" + + "github.com/sirupsen/logrus" +) + +const ( + // InternalErrorCode is what we write to the marker file to + // indicate that we failed to start the wrapped command + InternalErrorCode = 127 + // AbortedErrorCode is what we write to the marker file to + // indicate that we were terminated via a signal. + AbortedErrorCode = 130 + + // DefaultTimeout is the default timeout for the test + // process before SIGINT is sent + DefaultTimeout = 120 * time.Minute + + // DefaultGracePeriod is the default timeout for the test + // process after SIGINT is sent before SIGKILL is sent + DefaultGracePeriod = 15 * time.Second + + // DefaultShouldWaitForPrevStep is ... + DefaultShouldWaitForPrevStep = false + + // DefaultShouldRunPostRun is ... + DefaultShouldRunPostRun = false + + // DefaultPreRunFile is ... + DefaultPreRunFile = "0" + + // DefaultPostRunFile is ... + DefaultPostRunFile = "1" +) + +var ( + // errTimedOut is used as the command's error when the command + // is terminated after the timeout is reached + errTimedOut = errors.New("process timed out") + // errAborted is used as the command's error when the command + // is shut down by an external signal + errAborted = errors.New("process aborted") +) + +// Run executes the test process then writes the exit code to the marker file. +// This function returns the status code that should be passed to os.Exit(). +func (o Options) Run() int { + code, err := o.ExecuteProcess() + if err != nil { + logrus.WithError(err).Error("Error executing test process") + } + if err := o.mark(code); err != nil { + logrus.WithError(err).Error("Error writing exit code to marker file") + return InternalErrorCode + } + return code +} + +// ExecuteProcess creates the artifact directory then executes the process as +// configured, writing the output to the process log. +func (o Options) ExecuteProcess() (int, error) { + if o.ArtifactDir != "" { + if err := os.MkdirAll(o.ArtifactDir, os.ModePerm); err != nil { + return InternalErrorCode, fmt.Errorf("could not create artifact directory(%s): %v", o.ArtifactDir, err) + } + } + processLogFile, err := os.Create(o.ProcessLog) + if err != nil { + return InternalErrorCode, fmt.Errorf("could not create process logfile(%s): %v", o.ProcessLog, err) + } + defer processLogFile.Close() + + output := io.MultiWriter(os.Stdout, processLogFile) + logrus.SetOutput(output) + defer logrus.SetOutput(os.Stdout) + + // --- + timeout := optionOrDefault(o.Timeout, DefaultTimeout) + gracePeriod := optionOrDefault(o.GracePeriod, DefaultGracePeriod) + var commandErr error + cancelled, aborted := false, false + + done := make(chan error) + go func() { + done <- o.waitForPrevStep() + }() + select { + case err := <-done: + commandErr = err + if err != nil { + cancelled = true + } + case <-time.After(timeout): + logrus.Errorf("Previous step did not finish within %s timeout", timeout) + cancelled = true + } + + var returnCode int + if cancelled { + if aborted { + commandErr = errAborted + returnCode = AbortedErrorCode + } else { + commandErr = errTimedOut + returnCode = InternalErrorCode + } + return returnCode, commandErr + } + + // --- + executable := o.Args[0] + var arguments []string + if len(o.Args) > 1 { + arguments = o.Args[1:] + } + command := exec.Command(executable, arguments...) + command.Stderr = output + command.Stdout = output + if err := command.Start(); err != nil { + return InternalErrorCode, fmt.Errorf("could not start the process: %v", err) + } + + // if we get asked to terminate we need to forward + // that to the wrapped process as if it timed out + interrupt := make(chan os.Signal, 1) + signal.Notify(interrupt, os.Interrupt, syscall.SIGTERM) + + done = make(chan error) + go func() { + done <- command.Wait() + }() + select { + case err := <-done: + commandErr = err + if o.ShouldRunPostRun { + o.postRunWriteFile(0) + } + + case <-time.After(timeout): + logrus.Errorf("Process did not finish before %s timeout", timeout) + cancelled = true + gracefullyTerminate(command, done, gracePeriod) + case s := <-interrupt: + logrus.Errorf("Entrypoint received interrupt: %v", s) + cancelled = true + aborted = true + gracefullyTerminate(command, done, gracePeriod) + } + + if cancelled { + if aborted { + commandErr = errAborted + returnCode = AbortedErrorCode + } else { + commandErr = errTimedOut + returnCode = InternalErrorCode + } + } else { + if status, ok := command.ProcessState.Sys().(syscall.WaitStatus); ok { + returnCode = status.ExitStatus() + } else if commandErr == nil { + returnCode = 0 + } else { + returnCode = 1 + } + + if returnCode != 0 { + commandErr = fmt.Errorf("wrapped process failed: %v", commandErr) + } + } + return returnCode, commandErr +} + +func (o *Options) mark(exitCode int) error { + content := []byte(strconv.Itoa(exitCode)) + + // create temp file in the same directory as the desired marker file + dir := filepath.Dir(o.MarkerFile) + tempFile, err := ioutil.TempFile(dir, "temp-marker") + if err != nil { + return fmt.Errorf("could not create temp marker file in %s: %v", dir, err) + } + // write the exit code to the tempfile, sync to disk and close + if _, err = tempFile.Write(content); err != nil { + return fmt.Errorf("could not write to temp marker file (%s): %v", tempFile.Name(), err) + } + if err = tempFile.Sync(); err != nil { + return fmt.Errorf("could not sync temp marker file (%s): %v", tempFile.Name(), err) + } + tempFile.Close() + // set desired permission bits, then rename to the desired file name + if err = os.Chmod(tempFile.Name(), os.ModePerm); err != nil { + return fmt.Errorf("could not chmod (%x) temp marker file (%s): %v", os.ModePerm, tempFile.Name(), err) + } + if err := os.Rename(tempFile.Name(), o.MarkerFile); err != nil { + return fmt.Errorf("could not move marker file to destination path (%s): %v", o.MarkerFile, err) + } + return nil +} + +// optionOrDefault defaults to a value if option +// is the zero value +func optionOrDefault(option, defaultValue time.Duration) time.Duration { + if option == 0 { + return defaultValue + } + + return option +} + +func (o *Options) waitForPrevStep() error { + // wait for a file to exist that the last step wrote in a mounted shared dir + if o.ShouldWaitForPrevStep { + for { + _, err := os.Stat(o.PreRunFile) + if err == nil { + break + } else if !os.IsNotExist(err) { + return err + } + } + } + return nil +} + +func (o *Options) postRunWriteFile(exitCode int) error { + content := []byte(strconv.Itoa(exitCode)) + + // create temp file in the same directory as the desired marker file + dir := filepath.Dir(o.PostRunFile) + tempFile, err := ioutil.TempFile(dir, "temp-marker") + if err != nil { + return fmt.Errorf("could not create temp marker file in %s: %v", dir, err) + } + // write the exit code to the tempfile, sync to disk and close + if _, err = tempFile.Write(content); err != nil { + return fmt.Errorf("could not write to temp marker file (%s): %v", tempFile.Name(), err) + } + if err = tempFile.Sync(); err != nil { + return fmt.Errorf("could not sync temp marker file (%s): %v", tempFile.Name(), err) + } + tempFile.Close() + // set desired permission bits, then rename to the desired file name + if err = os.Chmod(tempFile.Name(), os.ModePerm); err != nil { + return fmt.Errorf("could not chmod (%x) temp marker file (%s): %v", os.ModePerm, tempFile.Name(), err) + } + if err := os.Rename(tempFile.Name(), o.PostRunFile); err != nil { + return fmt.Errorf("could not move marker file to destination path (%s): %v", o.PostRunFile, err) + } + return nil +} + +func gracefullyTerminate(command *exec.Cmd, done <-chan error, gracePeriod time.Duration) { + if err := command.Process.Signal(os.Interrupt); err != nil { + logrus.WithError(err).Error("Could not interrupt process after timeout") + } + select { + case <-done: + logrus.Errorf("Process gracefully exited before %s grace period", gracePeriod) + // but we ignore the output error as we will want errTimedOut + case <-time.After(gracePeriod): + logrus.Errorf("Process did not exit before %s grace period", gracePeriod) + if err := command.Process.Kill(); err != nil { + logrus.WithError(err).Error("Could not kill process after grace period") + } + } +} diff --git a/pkg/entrypoint/run_test.go b/pkg/entrypoint/run_test.go new file mode 100644 index 00000000..3c99d052 --- /dev/null +++ b/pkg/entrypoint/run_test.go @@ -0,0 +1,131 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package entrypoint + +// func TestOptions_Run(t *testing.T) { +// var testCases = []struct { +// name string +// args []string +// timeout time.Duration +// gracePeriod time.Duration +// expectedLog string +// expectedMarker string +// expectedShouldWaitForPrevStep bool +// expectedPreRunFile string +// expectedPostRunFile string +// expectedShouldRunPostRun bool +// }{ +// { +// name: "successful command", +// args: []string{"sh", "-c", "exit 0"}, +// expectedLog: "", +// expectedMarker: "0", +// expectedShouldRunPostRun: true, +// expectedPostRunFile: "0", +// }, +// { +// name: "successful command with output", +// args: []string{"echo", "test"}, +// expectedLog: "test\n", +// expectedMarker: "0", +// }, +// { +// name: "unsuccessful command", +// args: []string{"sh", "-c", "exit 12"}, +// expectedLog: "", +// expectedMarker: "12", +// }, +// { +// name: "unsuccessful command with output", +// args: []string{"sh", "-c", "echo test && exit 12"}, +// expectedLog: "test\n", +// expectedMarker: "12", +// }, +// { +// name: "command times out", +// args: []string{"sleep", "10"}, +// timeout: 1 * time.Second, +// gracePeriod: 1 * time.Second, +// expectedLog: "level=error msg=\"Process did not finish before 1s timeout\" \nlevel=error msg=\"Process gracefully exited before 1s grace period\" \n", +// expectedMarker: strconv.Itoa(InternalErrorCode), +// }, +// { +// name: "command times out and ignores interrupt", +// args: []string{"bash", "-c", "trap 'sleep 10' EXIT; sleep 10"}, +// timeout: 1 * time.Second, +// gracePeriod: 1 * time.Second, +// expectedLog: "level=error msg=\"Process did not finish before 1s timeout\" \nlevel=error msg=\"Process did not exit before 1s grace period\" \n", +// expectedMarker: strconv.Itoa(InternalErrorCode), +// }, +// } + +// // we write logs to the process log if wrapping fails +// // and cannot write timestamps or we can't match text +// logrus.SetFormatter(&logrus.TextFormatter{DisableTimestamp: true}) + +// for _, testCase := range testCases { +// t.Run(testCase.name, func(t *testing.T) { +// tmpDir, err := ioutil.TempDir("", testCase.name) +// if err != nil { +// t.Errorf("%s: error creating temp dir: %v", testCase.name, err) +// } +// defer func() { +// if err := os.RemoveAll(tmpDir); err != nil { +// t.Errorf("%s: error cleaning up temp dir: %v", testCase.name, err) +// } +// }() + +// options := Options{ +// Args: testCase.args, +// Timeout: testCase.timeout, +// GracePeriod: testCase.gracePeriod, +// Options: &wrapper.Options{ +// ProcessLog: path.Join(tmpDir, "process-log.txt"), +// MarkerFile: path.Join(tmpDir, "marker-file.txt"), +// ShouldWaitForPrevStep: false, +// PreRunFile: path.Join(tmpDir, "0"), +// PostRunFile: path.Join(tmpDir, "0"), +// }, +// } + +// if code := strconv.Itoa(options.Run()); code != testCase.expectedMarker { +// t.Errorf("%s: exit code %q does not match expected marker file contents %q", testCase.name, code, testCase.expectedMarker) +// } + +// compareFileContents(testCase.name, options.ProcessLog, testCase.expectedLog, t) +// compareFileContents(testCase.name, options.MarkerFile, testCase.expectedMarker, t) +// if options.ShouldWaitForPrevStep { +// compareFileContents(testCase.name, options.PreRunFile, +// testCase.expectedPreRunFile, t) +// } +// if options.ShouldRunPostRun { +// compareFileContents(testCase.name, options.PostRunFile, +// testCase.expectedPostRunFile, t) +// } +// }) +// } +// } + +// func compareFileContents(name, file, expected string, t *testing.T) { +// data, err := ioutil.ReadFile(file) +// if err != nil { +// t.Fatalf("%s: could not read file: %v", name, err) +// } +// if string(data) != expected { +// t.Errorf("%s: expected contents: %q, got %q", name, expected, data) +// } +// } diff --git a/pkg/entrypoint/wrapper/doc.go b/pkg/entrypoint/wrapper/doc.go new file mode 100644 index 00000000..07e77be1 --- /dev/null +++ b/pkg/entrypoint/wrapper/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2018 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package wrapper contains utilities for the processes that +// wrap the test execution in a ProwJob test container +package wrapper diff --git a/pkg/entrypoint/wrapper/options.go b/pkg/entrypoint/wrapper/options.go new file mode 100644 index 00000000..c2d38669 --- /dev/null +++ b/pkg/entrypoint/wrapper/options.go @@ -0,0 +1,76 @@ +/* +Copyright 2018 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package wrapper + +import ( + "errors" + "flag" +) + +// Options exposes the configuration options +// used when wrapping test execution +type Options struct { + // ProcessLog will contain std{out,err} from the + // wrapped test process + ProcessLog string `json:"process_log"` + + // MarkerFile will be written with the exit code + // of the test process or an internal error code + // if the entrypoint fails. + MarkerFile string `json:"marker_file"` + + // ShouldWaitForPrevStep will be written with the exit code + // of the test process or an internal error code + // if the entrypoint fails. + ShouldWaitForPrevStep bool `json:"shouldWaitForPrevStep"` + + // PreRunFile will be written with the exit code + // of the test process or an internal error code + // if the entrypoint fails. + PreRunFile string `json:"preRunFile"` + + // ShouldWaitForPrevStep will be written with the exit code + // of the test process or an internal error code + // if the entrypoint fails. + ShouldRunPostRun bool `json:"shouldRunPostRun"` + + // MarkerFile will be written with the exit code + // of the test process or an internal error code + // if the entrypoint fails. + PostRunFile string `json:"postRunFile"` +} + +// AddFlags adds flags to the FlagSet that populate +// the wrapper options struct provided. +func (o *Options) AddFlags(fs *flag.FlagSet) { + fs.StringVar(&o.ProcessLog, "process-log", "", "path to the log where stdout and stderr are streamed for the process we execute") + fs.StringVar(&o.MarkerFile, "marker-file", "", "file we write the return code of the process we execute once it has finished running") +} + +// Validate ensures that the set of options are +// self-consistent and valid +func (o *Options) Validate() error { + if o.ProcessLog == "" { + return errors.New("no log file specified with --process-log") + } + + if o.MarkerFile == "" { + return errors.New("no marker file specified with --marker-file") + } + + return nil +} diff --git a/pkg/entrypoint/wrapper/options_test.go b/pkg/entrypoint/wrapper/options_test.go new file mode 100644 index 00000000..d15524f2 --- /dev/null +++ b/pkg/entrypoint/wrapper/options_test.go @@ -0,0 +1,62 @@ +/* +Copyright 2018 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package wrapper + +import ( + "testing" +) + +func TestOptions_Validate(t *testing.T) { + var testCases = []struct { + name string + input Options + expectedErr bool + }{ + { + name: "all ok", + input: Options{ + ProcessLog: "output.txt", + MarkerFile: "marker.txt", + }, + expectedErr: false, + }, + { + name: "no process log", + input: Options{ + MarkerFile: "marker.txt", + }, + expectedErr: true, + }, + { + name: "no marker file", + input: Options{ + ProcessLog: "output.txt", + }, + expectedErr: true, + }, + } + + for _, testCase := range testCases { + err := testCase.input.Validate() + if testCase.expectedErr && err == nil { + t.Errorf("%s: expected an error but got none", testCase.name) + } + if !testCase.expectedErr && err != nil { + t.Errorf("%s: expected no error but got one: %v", testCase.name, err) + } + } +} diff --git a/pkg/reconciler/build/resources/pod.go b/pkg/reconciler/build/resources/pod.go index 20e50407..7c842013 100644 --- a/pkg/reconciler/build/resources/pod.go +++ b/pkg/reconciler/build/resources/pod.go @@ -19,27 +19,40 @@ limitations under the License. package resources import ( + "context" "crypto/rand" + "encoding/base64" "encoding/hex" "encoding/json" "flag" "fmt" "io" "io/ioutil" + mrand "math/rand" + "os" "path/filepath" "strconv" + "sync" + "time" + "go.opencensus.io/trace" corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes" + "github.com/google/go-containerregistry/pkg/authn" + "github.com/google/go-containerregistry/pkg/name" + "github.com/google/go-containerregistry/pkg/v1/remote" v1alpha1 "github.com/knative/build/pkg/apis/build/v1alpha1" "github.com/knative/build/pkg/credentials" "github.com/knative/build/pkg/credentials/dockercreds" "github.com/knative/build/pkg/credentials/gitcreds" "github.com/knative/pkg/apis" duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1" + "golang.org/x/oauth2/google" ) const workspaceDir = "/workspace" @@ -267,8 +280,9 @@ func MakePod(build *v1alpha1.Build, kubeclient kubernetes.Interface) (*corev1.Po var sources []v1alpha1.SourceSpec // if source is present convert into sources - // NOTES(aaron-prindle) adds custom steps outside of user Steps for git, logs, etc - podContainers := []corev1.Container{*cred} + initContainers := []corev1.Container{*cred} + podContainers := []corev1.Container{} + if source := build.Spec.Source; source != nil { sources = []v1alpha1.SourceSpec{*source} } @@ -284,26 +298,37 @@ func MakePod(build *v1alpha1.Build, kubeclient kubernetes.Interface) (*corev1.Po if err != nil { return nil, err } - podContainers = append(podContainers, *git) + initContainers = append(initContainers, *git) case source.GCS != nil: gcs, err := gcsToContainer(source, i) if err != nil { return nil, err } - podContainers = append(podContainers, *gcs) + initContainers = append(initContainers, *gcs) case source.Custom != nil: cust, err := customToContainer(source.Custom, source.Name) if err != nil { return nil, err } // Prepend the custom container to the steps, to be augmented later with env, volume mounts, etc. + build.Spec.Steps = append([]corev1.Container{*cust}, build.Spec.Steps...) } // webhook validation checks that only one source has subPath defined workspaceSubPath = source.SubPath } - // NOTES(aaron-prindle) setup volume mounts for steps + // init container that copies entrypoint binary into shared volume + // to be used by all other containers w/ entrypoint rewriting + initContainers = append(initContainers, + corev1.Container{ + Name: InitContainerName, + Image: DefaultEntrypointImage, + Command: []string{"/bin/cp"}, + Args: []string{"/entrypoint", BinaryLocation}, + VolumeMounts: []corev1.VolumeMount{toolsMount}, + }) + for i, step := range build.Spec.Steps { step.Env = append(implicitEnvVars, step.Env...) // TODO(mattmoor): Check that volumeMounts match volumes. @@ -341,6 +366,7 @@ func MakePod(build *v1alpha1.Build, kubeclient kubernetes.Interface) (*corev1.Po // declared user volumes. volumes := append(build.Spec.Volumes, implicitVolumes...) volumes = append(volumes, secrets...) + volumes = append(volumes, toolsVolume) if err := v1alpha1.ValidateVolumes(volumes); err != nil { return nil, err } @@ -351,19 +377,23 @@ func MakePod(build *v1alpha1.Build, kubeclient kubernetes.Interface) (*corev1.Po return nil, err } gibberish := hex.EncodeToString(b) - // entrypoint.RedirectSteps(podContainers) - RedirectSteps(podContainers) + + // Generate a unique name based on the build's name. + // Add a unique suffix to avoid confusion when a build + // is deleted and re-created with the same name. + // We don't use GenerateName here because k8s fakes don't support it. + name := fmt.Sprintf("%s-pod-%s", build.Name, gibberish) + + if err := RedirectSteps(podContainers, kubeclient, build); err != nil { + return nil, fmt.Errorf("unable to rewrite entrypoint for %q: %s", name, err) + } return &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ // We execute the build's pod in the same namespace as where the build was // created so that it can access colocated resources. Namespace: build.Namespace, - // Generate a unique name based on the build's name. - // Add a unique suffix to avoid confusion when a build - // is deleted and re-created with the same name. - // We don't use GenerateName here because k8s fakes don't support it. - Name: fmt.Sprintf("%s-pod-%s", build.Name, gibberish), + Name: name, // If our parent Build is deleted, then we should be as well. OwnerReferences: []metav1.OwnerReference{ *metav1.NewControllerRef(build, schema.GroupVersionKind{ @@ -380,6 +410,7 @@ func MakePod(build *v1alpha1.Build, kubeclient kubernetes.Interface) (*corev1.Po Spec: corev1.PodSpec{ // If the build fails, don't restart it. RestartPolicy: corev1.RestartPolicyNever, + InitContainers: initContainers, Containers: podContainers, ServiceAccountName: build.Spec.ServiceAccountName, Volumes: volumes, @@ -400,14 +431,14 @@ func BuildStatusFromPod(p *corev1.Pod, buildSpec v1alpha1.BuildSpec) v1alpha1.Bu StartTime: &p.CreationTimestamp, } - // Always ignore the first pod status, which is creds-init. + // Always ignore the first pod status, which is entrypoint cp (creds-init) skip := 1 if buildSpec.Source != nil { // If the build specifies source, skip another container status, which // is the source-fetching container. skip++ } - // Also skip multiple sourcees specified by the build. + // Also skip multiple sources specified by the build. skip += len(buildSpec.Sources) if skip <= len(p.Status.InitContainerStatuses) { for _, s := range p.Status.InitContainerStatuses[skip:] { @@ -504,8 +535,15 @@ const ( BinaryLocation = MountPoint + "/entrypoint" JSONConfigEnvVar = "ENTRYPOINT_OPTIONS" InitContainerName = "place-tools" - ProcessLogFile = "/tools/process-log.txt" - MarkerFile = "/tools/marker-file.txt" + // TODO(aaron-prindle) change this to wherever is sensible + DefaultEntrypointImage = "gcr.io/aprindle-vm-test/entrypoint:latest" + + ProcessLogFile = "/tools/process-log.txt" + MarkerFile = "/tools/marker-file.txt" + ShouldWaitForPrevStep = false + PreRunFile = "0" + ShouldRunPostRun = true + PostRunFile = "0" ) var toolsMount = corev1.VolumeMount{ @@ -513,23 +551,309 @@ var toolsMount = corev1.VolumeMount{ MountPath: MountPoint, } +var toolsVolume = corev1.Volume{ + Name: MountName, + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }, +} + type entrypointArgs struct { Args []string `json:"args"` ProcessLog string `json:"process_log"` MarkerFile string `json:"marker_file"` + + ShouldWaitForPrevStep bool `json:"shouldWaitForPrevStep"` + PreRunFile string `json:"preRunFile"` + ShouldRunPostRun bool `json:"shouldRunPostRun"` + PostRunFile string `json:"postRunFile"` +} + +// Cache is a simple caching mechanism allowing for caching the results of +// getting the Entrypoint of a container image from a remote registry. It +// is synchronized via a mutex so that we can share a single Cache across +// each worker thread that the reconciler is running. The mutex is necessary +// due to the possibility of a panic if two workers were to attempt to read and +// write to the internal map at the same time. +type Cache struct { + mtx sync.RWMutex + cache map[string][]string +} + +// NewCache is a simple helper function that returns a pointer to a Cache that +// has had the internal cache map initialized. +func NewCache() *Cache { + return &Cache{ + cache: make(map[string][]string), + } +} + +func (c *Cache) get(sha string) ([]string, bool) { + c.mtx.RLock() + ep, ok := c.cache[sha] + c.mtx.RUnlock() + return ep, ok +} + +func (c *Cache) set(sha string, ep []string) { + c.mtx.Lock() + c.cache[sha] = ep + c.mtx.Unlock() +} + +type AuthToken struct { + AccessToken string + Endpoint string +} + +type dockerJSON struct { + Auths map[string]registryAuth `json:"auths,omitempty"` +} + +type registryAuth struct { + Auth string `json:"auth"` + Email string `json:"email"` } +func getGCRAuthorizationKey() ([]AuthToken, error) { + ts, err := google.DefaultTokenSource(context.TODO(), "https://www.googleapis.com/auth/cloud-platform") + if err != nil { + return []AuthToken{}, err + } + + token, err := ts.Token() + if err != nil { + return []AuthToken{}, err + } + + if !token.Valid() { + return []AuthToken{}, fmt.Errorf("token was invalid") + } + + if token.Type() != "Bearer" { + return []AuthToken{}, fmt.Errorf(fmt.Sprintf("expected token type \"Bearer\" but got \"%s\"", token.Type())) + } + + return []AuthToken{ + AuthToken{ + AccessToken: token.AccessToken, + Endpoint: "https://us.gcr.io"}, //TODO(aaron-prindle) make this work for all regions + }, nil +} + +const ( + interval = 1 * time.Second + timeout = 5 * time.Minute +) + +// WaitForServiceAccount polls the status of the Pod called name from client every +// interval until inState returns `true` indicating it is done, returns an +// error or timeout. desc will be used to name the metric that is emitted to +// track how long it took for name to get into the state checked by inState. +func WaitForServiceAccount(kubeclient kubernetes.Interface, name string, namespace string, desc string) error { + metricName := fmt.Sprintf("WaitForServiceAccountState/%s/%s", name, desc) + _, span := trace.StartSpan(context.Background(), metricName) + defer span.End() + + return wait.PollImmediate(interval, timeout, func() (bool, error) { + _, err := kubeclient.CoreV1().ServiceAccounts(namespace).Get(name, metav1.GetOptions{}) + if err != nil { + return false, nil // TODO(aaron-prindle) better err msg? + } + + return true, nil + }) +} + +// WaitForSecret polls the status of the Pod called name from client every +// interval until inState returns `true` indicating it is done, returns an +// error or timeout. desc will be used to name the metric that is emitted to +// track how long it took for name to get into the state checked by inState. +func WaitForSecret(kubeclient kubernetes.Interface, name string, namespace string, desc string) error { + metricName := fmt.Sprintf("WaitForServiceAccountState/%s/%s", name, desc) + _, span := trace.StartSpan(context.Background(), metricName) + defer span.End() + + return wait.PollImmediate(interval, timeout, func() (bool, error) { + _, err := kubeclient.CoreV1().Secrets(namespace).Get(name, metav1.GetOptions{}) + if err != nil { + return false, nil // TODO(aaron-prindle) better err msg? + } + + return true, nil + }) +} + +// TODO(aaron-prindle) i don't think this method will work... +// var mutex = &sync.Mutex{} + +// GetRemoteEntrypoint accepts a cache of image lookups, as well as the image +// to look for. If the cache does not contain the image, it will lookup the +// metadata from the images registry, and then commit that to the cache +func GetRemoteEntrypoint(cache *Cache, image string, kubeclient kubernetes.Interface, build *v1alpha1.Build, dockercfgenv string) ([]string, error) { + + // TODO(aaron-prindle) i don't think this method will work... + // hold lock + // mutex.Lock() + // defer mutex.Unlock() + + serviceAccountName := build.Spec.ServiceAccountName + // if serviceAccountName == "" { + // serviceAccountName = "default" + // } + if serviceAccountName == "" || serviceAccountName == "default" { + // GKE metadata server authentication + tokens, err := getGCRAuthorizationKey() + if err != nil { + return nil, err + } + + dockerCfgTemplate := `{ "auths": {"%s": { "auth": "%s", "email": "none"} } }` + authTemplate := `oauth2accesstoken:%s` + secretStr := base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf(authTemplate, tokens[0].AccessToken))) + dockercfg := []byte(fmt.Sprintf(dockerCfgTemplate, tokens[0].Endpoint, secretStr)) + + // use random DOCKER_CONFIG env var + // path := filepath.Join(dockercfgenv, ".docker") + path := filepath.Join(os.Getenv("HOME"), ".docker") + if _, err := os.Stat(path); os.IsNotExist(err) { + os.Mkdir(path, 0644) + } + + // path = filepath.Join(dockercfgenv, ".docker", "config.json") + path = filepath.Join(os.Getenv("HOME"), ".docker", "config.json") + err = ioutil.WriteFile(path, dockercfg, 0644) + if err != nil { + return nil, err + } + } else { + // TODO(aaron-prindle) make sure to try all imagePullSecrets/registries + // TODO(aaron-prindle) see if there is a better way than blocking + WaitForServiceAccount(kubeclient, serviceAccountName, build.Namespace, "desc") + sa, err := kubeclient.CoreV1().ServiceAccounts(build.Namespace).Get(serviceAccountName, metav1.GetOptions{}) + if err != nil { + return nil, err // TODO(aaron-prindle) better err msg? + } + + fmt.Println(sa.ImagePullSecrets) + fmt.Println(len(sa.ImagePullSecrets)) + for _, secret := range sa.ImagePullSecrets { + // TODO(aaron-prindle) see if there is a better way than blocking + WaitForSecret(kubeclient, secret.Name, build.Namespace, "desc") + scrt, err := kubeclient.CoreV1().Secrets(build.Namespace).Get(secret.Name, metav1.GetOptions{}) + if err != nil { + return nil, err // TODO(aaron-prindle) better err msg? + } + + // path := filepath.Join(dockercfgenv, ".docker") + path := filepath.Join(os.Getenv("HOME"), ".docker") + if _, err := os.Stat(path); os.IsNotExist(err) { + os.Mkdir(path, 0644) + } + // TODO(aaron-prindle) see if there is a way to pass the auth to go-containerregisty + // to avoid writing .docker/config.json file + // parallelism might be a concern w/ a file + // path = filepath.Join(dockercfgenv, ".docker", "config.json") + + // TODO(aaron-prindle) support .dockerconfigjson and .dockercfg + if _, ok := scrt.Data[".dockerconfigjson"]; ok { + path = filepath.Join(os.Getenv("HOME"), ".docker", "config.json") + err = ioutil.WriteFile(path, scrt.Data[".dockerconfigjson"], 0644) + if err != nil { + return nil, err + } + } else if _, ok := scrt.Data[".dockercfg"]; ok { + return nil, fmt.Errorf(".dockercfg is currently not supported") + // fmt.Println("==========") + // fmt.Println(scrt.Data[".dockercfg"]) + // fmt.Println("==========") + // convert .dockercfg info to .docker/config.json format + // serialize to json + // grab "auth" field + // base64 decode that + // convert to json (need to drop _json_key thing?) + // grab private_key field + // create new json with .docker/config.json format + + // path = filepath.Join(os.Getenv("HOME"), ".docker", "config.json") + // err = ioutil.WriteFile(path, scrt.Data[".dockerconfigjson"], 0644) + // if err != nil { + // return nil, err + // } + + } else { + // TODO(aaron-prindle) warn/error? that no docker info in secret + } + } + } + + if ep, ok := cache.get(image); ok { + return ep, nil + } + + // verify the image name, then download the remote config file + ref, err := name.ParseReference(image, name.WeakValidation) + if err != nil { + return nil, fmt.Errorf("couldn't parse image %s: %v", image, err) + } + // TODO(aaron-prindle) have retry setup for the various methods + img, err := remote.Image(ref, remote.WithAuthFromKeychain(authn.DefaultKeychain)) + if err != nil { + return nil, fmt.Errorf("couldn't get container image info from registry %s: %v", image, err) + } + cfg, err := img.ConfigFile() + if err != nil { + return nil, fmt.Errorf("couldn't get config for image %s: %v", image, err) + } + cache.set(image, cfg.Config.Entrypoint) + return cfg.Config.Entrypoint, nil +} + +const letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" + +func RandStringBytes(n int) string { + b := make([]byte, n) + for i := range b { + b[i] = letterBytes[mrand.Intn(len(letterBytes))] + } + return string(b) +} + +// TODO(aaron-prindle) setup the cache properly +var cache = NewCache() + // RedirectSteps will modify each of the steps/containers such that // the binary being run is no longer the one specified by the Command // and the Args, but is instead the entrypoint binary, which will // itself invoke the Command and Args, but also capture logs. -func RedirectSteps(steps []corev1.Container) error { +func RedirectSteps(steps []corev1.Container, kubeclient kubernetes.Interface, build *v1alpha1.Build) error { + // For each step with no entrypoint set, try to populate it with the info + // from the remote registry + dockercfgenv := RandStringBytes(10) + dockercfgenv = "/" + dockercfgenv + // gen random string for i := range steps { step := &steps[i] - e, err := getEnvVar(step.Command, step.Args) + if len(step.Command) == 0 { + ep, err := GetRemoteEntrypoint(cache, step.Image, kubeclient, build, dockercfgenv) + if err != nil { + return fmt.Errorf("could not get entrypoint from registry for %s: %v", step.Image, err) + } + step.Command = ep + } + e, err := getEnvVar(step.Command, step.Args, i) + if i != 0 { + step.Resources = corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + // Must set memory limit to get MemoryStats.AvailableBytes + corev1.ResourceCPU: resource.MustParse("0m"), + }, + } + } if err != nil { return fmt.Errorf("couldn't get env var for entrypoint: %s", err) } + step.Command = []string{BinaryLocation} step.Args = []string{} @@ -542,16 +866,25 @@ func RedirectSteps(steps []corev1.Container) error { return nil } -func getEnvVar(cmd, args []string) (string, error) { +func getEnvVar(cmd, args []string, stepNumber int) (string, error) { + shouldWaitForPrevStep := ShouldWaitForPrevStep + // TODO(aaron-prindle) modify ShouldRunPostRun to not run on last step + if stepNumber != 0 { + shouldWaitForPrevStep = true + } + entrypointArgs := entrypointArgs{ - Args: append(cmd, args...), - ProcessLog: ProcessLogFile, - MarkerFile: MarkerFile, - // TODO(aaron-prindle) add the new options here + Args: append(cmd, args...), + ProcessLog: ProcessLogFile, + MarkerFile: MarkerFile, + ShouldWaitForPrevStep: shouldWaitForPrevStep, + PreRunFile: filepath.Join(MountPoint, strconv.Itoa(stepNumber)), + ShouldRunPostRun: ShouldRunPostRun, + PostRunFile: filepath.Join(MountPoint, strconv.Itoa(stepNumber+1)), } j, err := json.Marshal(entrypointArgs) if err != nil { - return "", fmt.Errorf("couldn't marshal arguments %q for entrypoint env var: %s", entrypointArgs, err) + return "", fmt.Errorf("couldn't marshal arguments %v for entrypoint env var: %s", entrypointArgs, err) } return string(j), nil } diff --git a/pkg/reconciler/build/resources/pod_test.go b/pkg/reconciler/build/resources/pod_test.go index 1dc54b6c..8b38b47f 100644 --- a/pkg/reconciler/build/resources/pod_test.go +++ b/pkg/reconciler/build/resources/pod_test.go @@ -18,6 +18,7 @@ package resources import ( "crypto/rand" + "fmt" "strings" "testing" @@ -38,12 +39,16 @@ var ( ignorePrivateResourceFields = cmpopts.IgnoreUnexported(resource.Quantity{}) ignoreVolatileTime = cmp.Comparer(func(_, _ apis.VolatileTime) bool { return true }) ignoreVolatileTimePtr = cmp.Comparer(func(_, _ *apis.VolatileTime) bool { return true }) - nopContainer = corev1.Container{ - Name: "nop", - Image: *nopImage, - } ) +var entrypointContainer = corev1.Container{ + Name: InitContainerName, + Image: DefaultEntrypointImage, + Command: []string{"/bin/cp"}, + Args: []string{"/entrypoint", BinaryLocation}, + VolumeMounts: []corev1.VolumeMount{toolsMount}, +} + func TestMakePod(t *testing.T) { subPath := "subpath" implicitVolumeMountsWithSubPath := []corev1.VolumeMount{} @@ -82,7 +87,7 @@ func TestMakePod(t *testing.T) { b: v1alpha1.BuildSpec{ Steps: []corev1.Container{{ Name: "name", - Image: "image", + Image: "gcr.io/kaniko-project/executor", }}, }, bAnnotations: map[string]string{ @@ -97,15 +102,15 @@ func TestMakePod(t *testing.T) { Env: implicitEnvVars, VolumeMounts: implicitVolumeMounts, WorkingDir: workspaceDir, - }, { + }}, + Containers: []corev1.Container{{ Name: "build-step-name", - Image: "image", + Image: "gcr.io/kaniko-project/executor", Env: implicitEnvVars, VolumeMounts: implicitVolumeMounts, WorkingDir: workspaceDir, }}, - Containers: []corev1.Container{nopContainer}, - Volumes: implicitVolumes, + Volumes: implicitVolumes, }, }, { desc: "source", @@ -118,7 +123,7 @@ func TestMakePod(t *testing.T) { }, Steps: []corev1.Container{{ Name: "name", - Image: "image", + Image: "gcr.io/kaniko-project/executor", }}, }, want: &corev1.PodSpec{ @@ -137,15 +142,15 @@ func TestMakePod(t *testing.T) { Env: implicitEnvVars, VolumeMounts: implicitVolumeMounts, WorkingDir: workspaceDir, - }, { + }}, + Containers: []corev1.Container{{ Name: "build-step-name", - Image: "image", + Image: "gcr.io/kaniko-project/executor", Env: implicitEnvVars, VolumeMounts: implicitVolumeMounts, WorkingDir: workspaceDir, }}, - Containers: []corev1.Container{nopContainer}, - Volumes: implicitVolumes, + Volumes: implicitVolumes, }, }, { desc: "sources", @@ -165,7 +170,7 @@ func TestMakePod(t *testing.T) { }}, Steps: []corev1.Container{{ Name: "name", - Image: "image", + Image: "gcr.io/kaniko-project/executor", }}, }, want: &corev1.PodSpec{ @@ -191,15 +196,15 @@ func TestMakePod(t *testing.T) { Env: implicitEnvVars, VolumeMounts: implicitVolumeMounts, WorkingDir: workspaceDir, - }, { + }}, + Containers: []corev1.Container{{ Name: "build-step-name", - Image: "image", + Image: "gcr.io/kaniko-project/executor", Env: implicitEnvVars, VolumeMounts: implicitVolumeMounts, WorkingDir: workspaceDir, }}, - Containers: []corev1.Container{nopContainer}, - Volumes: implicitVolumes, + Volumes: implicitVolumes, }, }, { desc: "git-source-with-subpath", @@ -213,7 +218,7 @@ func TestMakePod(t *testing.T) { }, Steps: []corev1.Container{{ Name: "name", - Image: "image", + Image: "gcr.io/kaniko-project/executor", }}, }, want: &corev1.PodSpec{ @@ -232,15 +237,15 @@ func TestMakePod(t *testing.T) { Env: implicitEnvVars, VolumeMounts: implicitVolumeMounts, // without subpath WorkingDir: workspaceDir, - }, { + }}, + Containers: []corev1.Container{{ Name: "build-step-name", - Image: "image", + Image: "gcr.io/kaniko-project/executor", Env: implicitEnvVars, VolumeMounts: implicitVolumeMountsWithSubPath, WorkingDir: workspaceDir, }}, - Containers: []corev1.Container{nopContainer}, - Volumes: implicitVolumes, + Volumes: implicitVolumes, }, }, { desc: "git-sources-with-subpath", @@ -262,7 +267,7 @@ func TestMakePod(t *testing.T) { }}, Steps: []corev1.Container{{ Name: "name", - Image: "image", + Image: "gcr.io/kaniko-project/executor", }}, }, want: &corev1.PodSpec{ @@ -288,15 +293,15 @@ func TestMakePod(t *testing.T) { Env: implicitEnvVars, VolumeMounts: implicitVolumeMounts, // without subpath WorkingDir: workspaceDir, - }, { + }}, + Containers: []corev1.Container{{ Name: "build-step-name", - Image: "image", + Image: "gcr.io/kaniko-project/executor", Env: implicitEnvVars, VolumeMounts: implicitVolumeMountsWithSubPath, WorkingDir: workspaceDir, }}, - Containers: []corev1.Container{nopContainer}, - Volumes: implicitVolumes, + Volumes: implicitVolumes, }, }, { desc: "gcs-source-with-subpath", @@ -310,7 +315,7 @@ func TestMakePod(t *testing.T) { }, Steps: []corev1.Container{{ Name: "name", - Image: "image", + Image: "gcr.io/kaniko-project/executor", }}, }, want: &corev1.PodSpec{ @@ -329,121 +334,124 @@ func TestMakePod(t *testing.T) { Env: implicitEnvVars, VolumeMounts: implicitVolumeMounts, // without subpath WorkingDir: workspaceDir, - }, { + }}, + Containers: []corev1.Container{{ Name: "build-step-name", - Image: "image", + Image: "gcr.io/kaniko-project/executor", Env: implicitEnvVars, VolumeMounts: implicitVolumeMountsWithSubPath, WorkingDir: workspaceDir, }}, - Containers: []corev1.Container{nopContainer}, - Volumes: implicitVolumes, + Volumes: implicitVolumes, }, - }, { - desc: "gcs-source-with-targetPath", - b: v1alpha1.BuildSpec{ - Source: &v1alpha1.SourceSpec{ - GCS: &v1alpha1.GCSSourceSpec{ - Type: v1alpha1.GCSManifest, - Location: "gs://foo/bar", + }, + { + desc: "gcs-source-with-targetPath", + b: v1alpha1.BuildSpec{ + Source: &v1alpha1.SourceSpec{ + GCS: &v1alpha1.GCSSourceSpec{ + Type: v1alpha1.GCSManifest, + Location: "gs://foo/bar", + }, + TargetPath: "path/foo", }, - TargetPath: "path/foo", }, - }, - want: &corev1.PodSpec{ - RestartPolicy: corev1.RestartPolicyNever, - InitContainers: []corev1.Container{{ - Name: initContainerPrefix + credsInit, - Image: *credsImage, - Args: []string{}, - Env: implicitEnvVars, - VolumeMounts: implicitVolumeMounts, // without subpath - WorkingDir: workspaceDir, - }, { - Name: initContainerPrefix + gcsSource + "-0", - Image: *gcsFetcherImage, - Args: []string{"--type", "Manifest", "--location", "gs://foo/bar", "--dest_dir", "/workspace/path/foo"}, - Env: implicitEnvVars, - VolumeMounts: implicitVolumeMounts, // without subpath - WorkingDir: workspaceDir, - }}, - Containers: []corev1.Container{nopContainer}, - Volumes: implicitVolumes, - }, - }, { - desc: "custom-source-with-subpath", - b: v1alpha1.BuildSpec{ - Source: &v1alpha1.SourceSpec{ - Custom: &corev1.Container{ - Image: "image", - }, - SubPath: subPath, + want: &corev1.PodSpec{ + RestartPolicy: corev1.RestartPolicyNever, + InitContainers: []corev1.Container{{ + Name: initContainerPrefix + credsInit, + Image: *credsImage, + Args: []string{}, + Env: implicitEnvVars, + VolumeMounts: implicitVolumeMounts, // without subpath + WorkingDir: workspaceDir, + }, { + Name: initContainerPrefix + gcsSource + "-0", + Image: *gcsFetcherImage, + Args: []string{"--type", "Manifest", "--location", "gs://foo/bar", "--dest_dir", "/workspace/path/foo"}, + Env: implicitEnvVars, + VolumeMounts: implicitVolumeMounts, // without subpath + WorkingDir: workspaceDir, + }}, + Containers: []corev1.Container{}, + Volumes: implicitVolumes, }, - Steps: []corev1.Container{{ - Name: "name", - Image: "image", - }}, }, - want: &corev1.PodSpec{ - RestartPolicy: corev1.RestartPolicyNever, - InitContainers: []corev1.Container{{ - Name: initContainerPrefix + credsInit, - Image: *credsImage, - Args: []string{}, - Env: implicitEnvVars, - VolumeMounts: implicitVolumeMounts, // without subpath - WorkingDir: workspaceDir, - }, { - Name: initContainerPrefix + customSource, - Image: "image", - Env: implicitEnvVars, - VolumeMounts: implicitVolumeMountsWithSubPath, // *with* subpath - WorkingDir: workspaceDir, - }, { - Name: "build-step-name", - Image: "image", - Env: implicitEnvVars, - VolumeMounts: implicitVolumeMountsWithSubPath, - WorkingDir: workspaceDir, - }}, - Containers: []corev1.Container{nopContainer}, - Volumes: implicitVolumes, - }, - }, { - desc: "with-service-account", - b: v1alpha1.BuildSpec{ - ServiceAccountName: "service-account", - Steps: []corev1.Container{{ - Name: "name", - Image: "image", - }}, - }, - want: &corev1.PodSpec{ - ServiceAccountName: "service-account", - RestartPolicy: corev1.RestartPolicyNever, - InitContainers: []corev1.Container{{ - Name: initContainerPrefix + credsInit, - Image: *credsImage, - Args: []string{ - "-basic-docker=multi-creds=https://docker.io", - "-basic-docker=multi-creds=https://us.gcr.io", - "-basic-git=multi-creds=github.com", - "-basic-git=multi-creds=gitlab.com", + { + desc: "custom-source-with-subpath", + b: v1alpha1.BuildSpec{ + Source: &v1alpha1.SourceSpec{ + Custom: &corev1.Container{ + Image: "gcr.io/kaniko-project/executor", + }, + SubPath: subPath, }, - Env: implicitEnvVars, - VolumeMounts: implicitVolumeMountsWithSecrets, - WorkingDir: workspaceDir, - }, { - Name: "build-step-name", - Image: "image", - Env: implicitEnvVars, - VolumeMounts: implicitVolumeMounts, - WorkingDir: workspaceDir, - }}, - Containers: []corev1.Container{nopContainer}, - Volumes: implicitVolumesWithSecrets, + Steps: []corev1.Container{{ + Name: "name", + Image: "gcr.io/kaniko-project/executor", + }}, + }, + want: &corev1.PodSpec{ + RestartPolicy: corev1.RestartPolicyNever, + InitContainers: []corev1.Container{{ + Name: initContainerPrefix + credsInit, + Image: *credsImage, + Args: []string{}, + Env: implicitEnvVars, + VolumeMounts: implicitVolumeMounts, // without subpath + WorkingDir: workspaceDir, + }}, + Containers: []corev1.Container{{ + Name: initContainerPrefix + customSource, + Image: "gcr.io/kaniko-project/executor", + Env: implicitEnvVars, + VolumeMounts: implicitVolumeMountsWithSubPath, // *with* subpath + WorkingDir: workspaceDir, + }, { + Name: "build-step-name", + Image: "gcr.io/kaniko-project/executor", + Env: implicitEnvVars, + VolumeMounts: implicitVolumeMountsWithSubPath, + WorkingDir: workspaceDir, + }}, + Volumes: implicitVolumes, + }, }, - }} { + { + desc: "with-service-account", + b: v1alpha1.BuildSpec{ + ServiceAccountName: "service-account", + Steps: []corev1.Container{{ + Name: "name", + Image: "gcr.io/kaniko-project/executor", + }}, + }, + want: &corev1.PodSpec{ + ServiceAccountName: "service-account", + RestartPolicy: corev1.RestartPolicyNever, + InitContainers: []corev1.Container{{ + Name: initContainerPrefix + credsInit, + Image: *credsImage, + Args: []string{ + "-basic-docker=multi-creds=https://docker.io", + "-basic-docker=multi-creds=https://us.gcr.io", + "-basic-git=multi-creds=github.com", + "-basic-git=multi-creds=gitlab.com", + }, + Env: implicitEnvVars, + VolumeMounts: implicitVolumeMountsWithSecrets, + WorkingDir: workspaceDir, + }}, + Containers: []corev1.Container{{ + Name: "build-step-name", + Image: "gcr.io/kaniko-project/executor", + Env: implicitEnvVars, + VolumeMounts: implicitVolumeMounts, + WorkingDir: workspaceDir, + }}, + Volumes: implicitVolumesWithSecrets, + }, + }} { t.Run(c.desc, func(t *testing.T) { cs := fakek8s.NewSimpleClientset( &corev1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "default"}}, @@ -478,6 +486,24 @@ func TestMakePod(t *testing.T) { if err != c.wantErr { t.Fatalf("MakePod: %v", err) } + c.want.InitContainers = append(c.want.InitContainers, entrypointContainer) + c.want.Volumes = append(c.want.Volumes, toolsVolume) + + for i := range c.want.Containers { + c.want.Containers[i].Command = []string{"/tools/entrypoint"} + c.want.Containers[i].VolumeMounts = append( + c.want.Containers[i].VolumeMounts, toolsMount) + shouldWaitForPrevStep := false + if i > 0 { + shouldWaitForPrevStep = true + } + c.want.Containers[i].Env = append( + c.want.Containers[i].Env, corev1.EnvVar{Name: "ENTRYPOINT_OPTIONS", + Value: fmt.Sprintf(`{"args":["/kaniko/executor"],"process_log":"/tools/process-log.txt","marker_file":"/tools/marker-file.txt","shouldWaitForPrevStep":%t,"preRunFile":"/tools/%d","shouldRunPostRun":true,"postRunFile":"/tools/%d"}`, + shouldWaitForPrevStep, i, i+1)}, + ) + c.want.Containers[i].Args = []string{} + } // Generated name from hexlifying a stream of 'a's. wantName := "build-name-pod-616161" @@ -485,6 +511,17 @@ func TestMakePod(t *testing.T) { t.Errorf("Pod name got %q, want %q", got.Name, wantName) } + // c.want.Containers[0].Resources. + for i := range c.want.Containers { + if i != 0 { + c.want.Containers[i].Resources = corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + // Must set memory limit to get MemoryStats.AvailableBytes + corev1.ResourceCPU: resource.MustParse("0m"), + }, + } + } + } if d := cmp.Diff(&got.Spec, c.want, ignorePrivateResourceFields); d != "" { t.Errorf("Diff spec:\n%s", d) } @@ -771,7 +808,6 @@ func TestBuildStatusFromPod(t *testing.T) { } c.want.Builder = v1alpha1.ClusterBuildProvider c.want.StartTime = &now - if d := cmp.Diff(got, c.want, ignoreVolatileTime); d != "" { t.Errorf("Diff:\n%s", d) } diff --git a/pkg/reconciler/build/validation_test.go b/pkg/reconciler/build/validation_test.go index 39ca9b4e..e59a1971 100644 --- a/pkg/reconciler/build/validation_test.go +++ b/pkg/reconciler/build/validation_test.go @@ -43,7 +43,7 @@ func TestValidateBuild(t *testing.T) { Template: &v1alpha1.TemplateInstantiationSpec{ Arguments: []v1alpha1.ArgumentSpec{{ Name: "foo", - Value: "hello", + Value: "hello-world", }, { Name: "foo", Value: "world", @@ -138,7 +138,7 @@ func TestValidateBuild(t *testing.T) { Name: "template", Arguments: []v1alpha1.ArgumentSpec{{ Name: "foo", - Value: "hello", + Value: "hello-world", }}, }, }, @@ -162,7 +162,7 @@ func TestValidateBuild(t *testing.T) { Name: "template", Arguments: []v1alpha1.ArgumentSpec{{ Name: "bar", - Value: "hello", + Value: "hello-world", }}, }, }, @@ -179,7 +179,7 @@ func TestValidateBuild(t *testing.T) { Name: "template", Arguments: []v1alpha1.ArgumentSpec{{ Name: "foo", - Value: "hello", + Value: "hello-world", }}, }, }, @@ -238,7 +238,7 @@ func TestValidateBuild(t *testing.T) { build: &v1alpha1.Build{ Spec: v1alpha1.BuildSpec{ // ServiceAccountName will default to "default" - Steps: []corev1.Container{{Image: "hello"}}, + Steps: []corev1.Container{{Image: "hello-world"}}, }, }, sa: &corev1.ServiceAccount{ @@ -275,7 +275,7 @@ func TestValidateBuild(t *testing.T) { build: &v1alpha1.Build{ Spec: v1alpha1.BuildSpec{ ServiceAccountName: "serviceaccount", - Steps: []corev1.Container{{Image: "hello"}}, + Steps: []corev1.Container{{Image: "hello-world"}}, }, }, sa: &corev1.ServiceAccount{ diff --git a/test/serviceaccount/secret.yaml b/test/serviceaccount/secret.yaml index f4f9ac49..d75e32e4 100644 --- a/test/serviceaccount/secret.yaml +++ b/test/serviceaccount/secret.yaml @@ -15,11 +15,12 @@ apiVersion: v1 kind: Secret metadata: name: test-readonly-credentials -type: kubernetes.io/dockercfg +type: kubernetes.io/dockerconfigjson data: # Generated by: # kubectl create secret docker-registry regsecret --docker-server=https://gcr.io \ # --docker-username=_json_key --docker-password="$(cat /tmp/key.json)" \ # --docker-email=noreply@google.com --dry-run -o yaml # This service account is JUST a storage reader on gcr.io/build-crd-testing - .dockercfg: eyJodHRwczovL2djci5pbyI6eyJ1c2VybmFtZSI6Il9qc29uX2tleSIsInBhc3N3b3JkIjoie1xuICBcInR5cGVcIjogXCJzZXJ2aWNlX2FjY291bnRcIixcbiAgXCJwcm9qZWN0X2lkXCI6IFwiYnVpbGQtY3JkLXRlc3RpbmdcIixcbiAgXCJwcml2YXRlX2tleV9pZFwiOiBcIjA1MDJhNDFhODEyZmI2NGNlNTZhNjhlYzU4MzJhYjBiYTExYzExZTZcIixcbiAgXCJwcml2YXRlX2tleVwiOiBcIi0tLS0tQkVHSU4gUFJJVkFURSBLRVktLS0tLVxcbk1JSUV2UUlCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktjd2dnU2pBZ0VBQW9JQkFRQzlYNEVZT0FSYnhRTThcXG5EMnhYY2FaVGsrZ1k4ZWp1OTh0THFDUXFUckdNVzlSZVQyeE9ZNUF5Z2FsUFArcDd5WEVja3dCRC9IaE0wZ2xJXFxuN01UTGRlZUtXcityQTFMd0haeVdGVzdIME9uZjd3bllIRUhMV1VtYzNCQ09SRUR0SFJaN1pyUEJmMUhUQUEvM1xcbk1uVzVsWkhTTjlvanpTU0Z3NkFWdTZqNmF4YkJJSUo3NTRMcmdLZUFZdXJ3ZklRMlJMVHUyMDFrMklxTFliaGJcXG4zbVNWRzVSK3RiS3oxQ3ZNNTNuSENiN0NmdVZlV3NyQThrazd4SHJyTFFLTW1JOXYyc2dSdWd5TUF6d3ovNnpOXFxuaDUvaU14eGdlcTVXOHhrVngzSjJuWThKSmRIYWYvVDZBR3NPTkVvNDNweGVpUVZqblJmL0tuMTBUQ2MyRXNJWVxcblM0OVVzWjdCQWdNQkFBRUNnZ0VBQXVwbGR1a0NRUXVENVUvZ2FtSHQ3R2dXM0FNVjE4ZXFuSG5DYTJqbGFoK1NcXG5BZVVHbmhnSmpOdkUrcE1GbFN2NXVmMnAySzRlZC9veEQ2K0NwOVpYRFJqZ3ZmdEl5cWpsemJ3dkZjZ3p3TnVEXFxueWdVa3VwN0hlY0RzRDhUdGVBb2JUL1Zwd3E2ektNckJ3Q3ZOa3Z5NmJWbG9FajV4M2JYc2F4ZTk1RE8veXB1NlxcbncwVzk3enh3d0RKWTZLUWNJV01qaHJHeHZ3WDduaVVDZU00bGVXQkR5R3R3MXplSm40aEVjNk4zYWpRYWNYS2NcXG4rNFFseGNpYW1ZcVFXYlBudHhXUWhoUXpjSFdMaTJsOWNGYlpENyt1SkxGNGlONnk4bVZOVTNLM0sxYlJZclNEXFxuUlVwM2FVVkJYbUZnK1ovMnB1VkwrbVUzajNMTFdZeUJPa2V2dU9tZGdRS0JnUURlM0dJUWt5V0lTMTRUZE1PU1xcbkJpS0JDRHk4aDk2ZWhMMEhrRGJ5T2tTdFBLZEY5cHVFeFp4aHk3b2pIQ0lNNUZWcnBSTjI1cDRzRXp3RmFjK3ZcXG5KSUZnRXZxN21YZm1YaVhJTmllUG9FUWFDbm54RHhXZ21yMEhVS0VtUzlvTWRnTGNHVStrQ1ZHTnN6N0FPdW0wXFxuS3FZM3MyMlE5bFE2N0ZPeXFpdThXRlE3UVFLQmdRRFppRmhURVprUEVjcVpqbndKcFRCNTZaV1A5S1RzbFpQN1xcbndVNGJ6aTZ5K21leWYzTUorNEwyU3lIYzNjcFNNYmp0Tk9aQ3Q0N2I5MDhGVW1MWFVHTmhjd3VaakVReEZleTBcXG5tNDFjUzVlNFA0OWI5bjZ5TEJqQnJCb3FzMldCYWwyZWdkaE5KU3NDV29pWlA4L1pUOGVnWHZoN2I5MWp6b0syXFxucTJQVW1BNERnUUtCZ0FXTDJJanZFSTBPeXgyUzExY24vZTNXSmFUUGdOUFRHOTAzVXBhK3FuemhPSXgrTWFxaFxcblBGNFdzdUF5MEFvZ0dKd2dOSmJOOEh2S1VzRVR2QTV3eXlOMzlYTjd3MGNoYXJGTDM3b3NVK1dPQXpEam5qY3NcXG5BcTVPN0dQR21YdWI2RUJRQlBKaEpQMXd5NHYvSzFmSGcvRjQ3cTRmNDBMQUpPa2FZUkpENUh6QkFvR0JBTlVoXFxubklCUEpxcTRJTXZRNmNDOWc4QisxeFlEZWE5L1lrMXcrU21QR3Z3ckVYeTNHS3g0SzdsS3BiUHo3bTRYMzNzeFxcbnNFVS8rWTJWUW13UmExeFFtLzUzcks3VjJsNUpmL0Q0MDBqUm02WmZTQU92Z0RUcnRablVHSk1yejlFN3VOdzdcXG5sZ1VIM0pyaXZ5Ri9meE1JOHFzelFid1hQMCt4bnlxQXhFQWdkdUtCQW9HQUlNK1BTTllXQ1pYeERwU0hJMThkXFxuaktrb0FidzJNb3l3UUlsa2V1QW4xZFhGYWQxenNYUUdkVHJtWHl2N05QUCs4R1hCa25CTGkzY3Z4VGlsSklTeVxcbnVjTnJDTWlxTkFTbi9kcTdjV0RGVUFCZ2pYMTZKSDJETkZaL2wvVVZGM05EQUpqWENzMVg3eUlKeVhCNm94L3pcXG5hU2xxbElNVjM1REJEN3F4Unl1S3Nnaz1cXG4tLS0tLUVORCBQUklWQVRFIEtFWS0tLS0tXFxuXCIsXG4gIFwiY2xpZW50X2VtYWlsXCI6IFwicHVsbC1zZWNyZXQtdGVzdGluZ0BidWlsZC1jcmQtdGVzdGluZy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbVwiLFxuICBcImNsaWVudF9pZFwiOiBcIjEwNzkzNTg2MjAzMzAyNTI1MTM1MlwiLFxuICBcImF1dGhfdXJpXCI6IFwiaHR0cHM6Ly9hY2NvdW50cy5nb29nbGUuY29tL28vb2F1dGgyL2F1dGhcIixcbiAgXCJ0b2tlbl91cmlcIjogXCJodHRwczovL2FjY291bnRzLmdvb2dsZS5jb20vby9vYXV0aDIvdG9rZW5cIixcbiAgXCJhdXRoX3Byb3ZpZGVyX3g1MDlfY2VydF91cmxcIjogXCJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9vYXV0aDIvdjEvY2VydHNcIixcbiAgXCJjbGllbnRfeDUwOV9jZXJ0X3VybFwiOiBcImh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3JvYm90L3YxL21ldGFkYXRhL3g1MDkvcHVsbC1zZWNyZXQtdGVzdGluZyU0MGJ1aWxkLWNyZC10ZXN0aW5nLmlhbS5nc2VydmljZWFjY291bnQuY29tXCJcbn0iLCJlbWFpbCI6Im5vcmVwbHlAZ29vZ2xlLmNvbSIsImF1dGgiOiJYMnB6YjI1ZmEyVjVPbnNLSUNBaWRIbHdaU0k2SUNKelpYSjJhV05sWDJGalkyOTFiblFpTEFvZ0lDSndjbTlxWldOMFgybGtJam9nSW1KMWFXeGtMV055WkMxMFpYTjBhVzVuSWl3S0lDQWljSEpwZG1GMFpWOXJaWGxmYVdRaU9pQWlNRFV3TW1FME1XRTRNVEptWWpZMFkyVTFObUUyT0dWak5UZ3pNbUZpTUdKaE1URmpNVEZsTmlJc0NpQWdJbkJ5YVhaaGRHVmZhMlY1SWpvZ0lpMHRMUzB0UWtWSFNVNGdVRkpKVmtGVVJTQkxSVmt0TFMwdExWeHVUVWxKUlhaUlNVSkJSRUZPUW1kcmNXaHJhVWM1ZHpCQ1FWRkZSa0ZCVTBOQ1MyTjNaMmRUYWtGblJVRkJiMGxDUVZGRE9WZzBSVmxQUVZKaWVGRk5PRnh1UkRKNFdHTmhXbFJySzJkWk9HVnFkVGs0ZEV4eFExRnhWSEpIVFZjNVVtVlVNbmhQV1RWQmVXZGhiRkJRSzNBM2VWaEZZMnQzUWtRdlNHaE5NR2RzU1Z4dU4wMVVUR1JsWlV0WGNpdHlRVEZNZDBoYWVWZEdWemRJTUU5dVpqZDNibGxJUlVoTVYxVnRZek5DUTA5U1JVUjBTRkphTjFweVVFSm1NVWhVUVVFdk0xeHVUVzVYTld4YVNGTk9PVzlxZWxOVFJuYzJRVloxTm1vMllYaGlRa2xKU2pjMU5FeHlaMHRsUVZsMWNuZG1TVkV5VWt4VWRUSXdNV3N5U1hGTVdXSm9ZbHh1TTIxVFZrYzFVaXQwWWt0Nk1VTjJUVFV6YmtoRFlqZERablZXWlZkemNrRTRhMnMzZUVoeWNreFJTMDF0U1RsMk1uTm5VblZuZVUxQmVuZDZMelo2VGx4dWFEVXZhVTE0ZUdkbGNUVlhPSGhyVm5nelNqSnVXVGhLU21SSVlXWXZWRFpCUjNOUFRrVnZORE53ZUdWcFVWWnFibEptTDB0dU1UQlVRMk15UlhOSldWeHVVelE1VlhOYU4wSkJaMDFDUVVGRlEyZG5SVUZCZFhCc1pIVnJRMUZSZFVRMVZTOW5ZVzFJZERkSFoxY3pRVTFXTVRobGNXNUlia05oTW1wc1lXZ3JVMXh1UVdWVlIyNW9aMHBxVG5aRkszQk5SbXhUZGpWMVpqSndNa3MwWldRdmIzaEVOaXREY0RsYVdFUlNhbWQyWm5SSmVYRnFiSHBpZDNaR1kyZDZkMDUxUkZ4dWVXZFZhM1Z3TjBobFkwUnpSRGhVZEdWQmIySlVMMVp3ZDNFMmVrdE5ja0ozUTNaT2EzWjVObUpXYkc5RmFqVjRNMkpZYzJGNFpUazFSRTh2ZVhCMU5seHVkekJYT1RkNmVIZDNSRXBaTmt0UlkwbFhUV3BvY2tkNGRuZFlOMjVwVlVObFRUUnNaVmRDUkhsSGRIY3hlbVZLYmpSb1JXTTJUak5oYWxGaFkxaExZMXh1S3pSUmJIaGphV0Z0V1hGUlYySlFiblI0VjFGb2FGRjZZMGhYVEdreWJEbGpSbUphUkRjcmRVcE1SalJwVGpaNU9HMVdUbFV6U3pOTE1XSlNXWEpUUkZ4dVVsVndNMkZWVmtKWWJVWm5LMW92TW5CMVZrd3JiVlV6YWpOTVRGZFplVUpQYTJWMmRVOXRaR2RSUzBKblVVUmxNMGRKVVd0NVYwbFRNVFJVWkUxUFUxeHVRbWxMUWtORWVUaG9PVFpsYUV3d1NHdEVZbmxQYTFOMFVFdGtSamx3ZFVWNFduaG9lVGR2YWtoRFNVMDFSbFp5Y0ZKT01qVndOSE5GZW5kR1lXTXJkbHh1U2tsR1owVjJjVGR0V0dadFdHbFlTVTVwWlZCdlJWRmhRMjV1ZUVSNFYyZHRjakJJVlV0RmJWTTViMDFrWjB4alIxVXJhME5XUjA1emVqZEJUM1Z0TUZ4dVMzRlpNM015TWxFNWJGRTJOMFpQZVhGcGRUaFhSbEUzVVZGTFFtZFJSRnBwUm1oVVJWcHJVRVZqY1ZwcWJuZEtjRlJDTlRaYVYxQTVTMVJ6YkZwUU4xeHVkMVUwWW5wcE5ua3JiV1Y1WmpOTlNpczBUREpUZVVoak0yTndVMDFpYW5ST1QxcERkRFEzWWprd09FWlZiVXhZVlVkT2FHTjNkVnBxUlZGNFJtVjVNRnh1YlRReFkxTTFaVFJRTkRsaU9XNDJlVXhDYWtKeVFtOXhjekpYUW1Gc01tVm5aR2hPU2xOelExZHZhVnBRT0M5YVZEaGxaMWgyYURkaU9URnFlbTlMTWx4dWNUSlFWVzFCTkVSblVVdENaMEZYVERKSmFuWkZTVEJQZVhneVV6RXhZMjR2WlROWFNtRlVVR2RPVUZSSE9UQXpWWEJoSzNGdWVtaFBTWGdyVFdGeGFGeHVVRVkwVjNOMVFYa3dRVzluUjBwM1owNUtZazQ0U0haTFZYTkZWSFpCTlhkNWVVNHpPVmhPTjNjd1kyaGhja1pNTXpkdmMxVXJWMDlCZWtScWJtcGpjMXh1UVhFMVR6ZEhVRWR0V0hWaU5rVkNVVUpRU21oS1VERjNlVFIyTDBzeFpraG5MMFkwTjNFMFpqUXdURUZLVDJ0aFdWSktSRFZJZWtKQmIwZENRVTVWYUZ4dWJrbENVRXB4Y1RSSlRYWlJObU5ET1djNFFpc3hlRmxFWldFNUwxbHJNWGNyVTIxUVIzWjNja1ZZZVROSFMzZzBTemRzUzNCaVVIbzNiVFJZTXpOemVGeHVjMFZWTHl0Wk1sWlJiWGRTWVRGNFVXMHZOVE55U3pkV01tdzFTbVl2UkRRd01HcFNiVFphWmxOQlQzWm5SRlJ5ZEZwdVZVZEtUWEo2T1VVM2RVNTNOMXh1YkdkVlNETktjbWwyZVVZdlpuaE5TVGh4YzNwUlluZFlVREFyZUc1NWNVRjRSVUZuWkhWTFFrRnZSMEZKVFN0UVUwNVpWME5hV0hoRWNGTklTVEU0WkZ4dWFrdHJiMEZpZHpKTmIzbDNVVWxzYTJWMVFXNHhaRmhHWVdReGVuTllVVWRrVkhKdFdIbDJOMDVRVUNzNFIxaENhMjVDVEdrelkzWjRWR2xzU2tsVGVWeHVkV05PY2tOTmFYRk9RVk51TDJSeE4yTlhSRVpWUVVKbmFsZ3hOa3BJTWtST1Jsb3ZiQzlWVmtZelRrUkJTbXBZUTNNeFdEZDVTVXA1V0VJMmIzZ3ZlbHh1WVZOc2NXeEpUVll6TlVSQ1JEZHhlRko1ZFV0eloyczlYRzR0TFMwdExVVk9SQ0JRVWtsV1FWUkZJRXRGV1MwdExTMHRYRzRpTEFvZ0lDSmpiR2xsYm5SZlpXMWhhV3dpT2lBaWNIVnNiQzF6WldOeVpYUXRkR1Z6ZEdsdVowQmlkV2xzWkMxamNtUXRkR1Z6ZEdsdVp5NXBZVzB1WjNObGNuWnBZMlZoWTJOdmRXNTBMbU52YlNJc0NpQWdJbU5zYVdWdWRGOXBaQ0k2SUNJeE1EYzVNelU0TmpJd016TXdNalV5TlRFek5USWlMQW9nSUNKaGRYUm9YM1Z5YVNJNklDSm9kSFJ3Y3pvdkwyRmpZMjkxYm5SekxtZHZiMmRzWlM1amIyMHZieTl2WVhWMGFESXZZWFYwYUNJc0NpQWdJblJ2YTJWdVgzVnlhU0k2SUNKb2RIUndjem92TDJGalkyOTFiblJ6TG1kdmIyZHNaUzVqYjIwdmJ5OXZZWFYwYURJdmRHOXJaVzRpTEFvZ0lDSmhkWFJvWDNCeWIzWnBaR1Z5WDNnMU1EbGZZMlZ5ZEY5MWNtd2lPaUFpYUhSMGNITTZMeTkzZDNjdVoyOXZaMnhsWVhCcGN5NWpiMjB2YjJGMWRHZ3lMM1l4TDJObGNuUnpJaXdLSUNBaVkyeHBaVzUwWDNnMU1EbGZZMlZ5ZEY5MWNtd2lPaUFpYUhSMGNITTZMeTkzZDNjdVoyOXZaMnhsWVhCcGN5NWpiMjB2Y205aWIzUXZkakV2YldWMFlXUmhkR0V2ZURVd09TOXdkV3hzTFhObFkzSmxkQzEwWlhOMGFXNW5KVFF3WW5WcGJHUXRZM0prTFhSbGMzUnBibWN1YVdGdExtZHpaWEoyYVdObFlXTmpiM1Z1ZEM1amIyMGlDbjA9In19 + # .dockercfg: eyJodHRwczovL2djci5pbyI6eyJ1c2VybmFtZSI6Il9qc29uX2tleSIsInBhc3N3b3JkIjoie1xuICBcInR5cGVcIjogXCJzZXJ2aWNlX2FjY291bnRcIixcbiAgXCJwcm9qZWN0X2lkXCI6IFwiYnVpbGQtY3JkLXRlc3RpbmdcIixcbiAgXCJwcml2YXRlX2tleV9pZFwiOiBcIjA1MDJhNDFhODEyZmI2NGNlNTZhNjhlYzU4MzJhYjBiYTExYzExZTZcIixcbiAgXCJwcml2YXRlX2tleVwiOiBcIi0tLS0tQkVHSU4gUFJJVkFURSBLRVktLS0tLVxcbk1JSUV2UUlCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktjd2dnU2pBZ0VBQW9JQkFRQzlYNEVZT0FSYnhRTThcXG5EMnhYY2FaVGsrZ1k4ZWp1OTh0THFDUXFUckdNVzlSZVQyeE9ZNUF5Z2FsUFArcDd5WEVja3dCRC9IaE0wZ2xJXFxuN01UTGRlZUtXcityQTFMd0haeVdGVzdIME9uZjd3bllIRUhMV1VtYzNCQ09SRUR0SFJaN1pyUEJmMUhUQUEvM1xcbk1uVzVsWkhTTjlvanpTU0Z3NkFWdTZqNmF4YkJJSUo3NTRMcmdLZUFZdXJ3ZklRMlJMVHUyMDFrMklxTFliaGJcXG4zbVNWRzVSK3RiS3oxQ3ZNNTNuSENiN0NmdVZlV3NyQThrazd4SHJyTFFLTW1JOXYyc2dSdWd5TUF6d3ovNnpOXFxuaDUvaU14eGdlcTVXOHhrVngzSjJuWThKSmRIYWYvVDZBR3NPTkVvNDNweGVpUVZqblJmL0tuMTBUQ2MyRXNJWVxcblM0OVVzWjdCQWdNQkFBRUNnZ0VBQXVwbGR1a0NRUXVENVUvZ2FtSHQ3R2dXM0FNVjE4ZXFuSG5DYTJqbGFoK1NcXG5BZVVHbmhnSmpOdkUrcE1GbFN2NXVmMnAySzRlZC9veEQ2K0NwOVpYRFJqZ3ZmdEl5cWpsemJ3dkZjZ3p3TnVEXFxueWdVa3VwN0hlY0RzRDhUdGVBb2JUL1Zwd3E2ektNckJ3Q3ZOa3Z5NmJWbG9FajV4M2JYc2F4ZTk1RE8veXB1NlxcbncwVzk3enh3d0RKWTZLUWNJV01qaHJHeHZ3WDduaVVDZU00bGVXQkR5R3R3MXplSm40aEVjNk4zYWpRYWNYS2NcXG4rNFFseGNpYW1ZcVFXYlBudHhXUWhoUXpjSFdMaTJsOWNGYlpENyt1SkxGNGlONnk4bVZOVTNLM0sxYlJZclNEXFxuUlVwM2FVVkJYbUZnK1ovMnB1VkwrbVUzajNMTFdZeUJPa2V2dU9tZGdRS0JnUURlM0dJUWt5V0lTMTRUZE1PU1xcbkJpS0JDRHk4aDk2ZWhMMEhrRGJ5T2tTdFBLZEY5cHVFeFp4aHk3b2pIQ0lNNUZWcnBSTjI1cDRzRXp3RmFjK3ZcXG5KSUZnRXZxN21YZm1YaVhJTmllUG9FUWFDbm54RHhXZ21yMEhVS0VtUzlvTWRnTGNHVStrQ1ZHTnN6N0FPdW0wXFxuS3FZM3MyMlE5bFE2N0ZPeXFpdThXRlE3UVFLQmdRRFppRmhURVprUEVjcVpqbndKcFRCNTZaV1A5S1RzbFpQN1xcbndVNGJ6aTZ5K21leWYzTUorNEwyU3lIYzNjcFNNYmp0Tk9aQ3Q0N2I5MDhGVW1MWFVHTmhjd3VaakVReEZleTBcXG5tNDFjUzVlNFA0OWI5bjZ5TEJqQnJCb3FzMldCYWwyZWdkaE5KU3NDV29pWlA4L1pUOGVnWHZoN2I5MWp6b0syXFxucTJQVW1BNERnUUtCZ0FXTDJJanZFSTBPeXgyUzExY24vZTNXSmFUUGdOUFRHOTAzVXBhK3FuemhPSXgrTWFxaFxcblBGNFdzdUF5MEFvZ0dKd2dOSmJOOEh2S1VzRVR2QTV3eXlOMzlYTjd3MGNoYXJGTDM3b3NVK1dPQXpEam5qY3NcXG5BcTVPN0dQR21YdWI2RUJRQlBKaEpQMXd5NHYvSzFmSGcvRjQ3cTRmNDBMQUpPa2FZUkpENUh6QkFvR0JBTlVoXFxubklCUEpxcTRJTXZRNmNDOWc4QisxeFlEZWE5L1lrMXcrU21QR3Z3ckVYeTNHS3g0SzdsS3BiUHo3bTRYMzNzeFxcbnNFVS8rWTJWUW13UmExeFFtLzUzcks3VjJsNUpmL0Q0MDBqUm02WmZTQU92Z0RUcnRablVHSk1yejlFN3VOdzdcXG5sZ1VIM0pyaXZ5Ri9meE1JOHFzelFid1hQMCt4bnlxQXhFQWdkdUtCQW9HQUlNK1BTTllXQ1pYeERwU0hJMThkXFxuaktrb0FidzJNb3l3UUlsa2V1QW4xZFhGYWQxenNYUUdkVHJtWHl2N05QUCs4R1hCa25CTGkzY3Z4VGlsSklTeVxcbnVjTnJDTWlxTkFTbi9kcTdjV0RGVUFCZ2pYMTZKSDJETkZaL2wvVVZGM05EQUpqWENzMVg3eUlKeVhCNm94L3pcXG5hU2xxbElNVjM1REJEN3F4Unl1S3Nnaz1cXG4tLS0tLUVORCBQUklWQVRFIEtFWS0tLS0tXFxuXCIsXG4gIFwiY2xpZW50X2VtYWlsXCI6IFwicHVsbC1zZWNyZXQtdGVzdGluZ0BidWlsZC1jcmQtdGVzdGluZy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbVwiLFxuICBcImNsaWVudF9pZFwiOiBcIjEwNzkzNTg2MjAzMzAyNTI1MTM1MlwiLFxuICBcImF1dGhfdXJpXCI6IFwiaHR0cHM6Ly9hY2NvdW50cy5nb29nbGUuY29tL28vb2F1dGgyL2F1dGhcIixcbiAgXCJ0b2tlbl91cmlcIjogXCJodHRwczovL2FjY291bnRzLmdvb2dsZS5jb20vby9vYXV0aDIvdG9rZW5cIixcbiAgXCJhdXRoX3Byb3ZpZGVyX3g1MDlfY2VydF91cmxcIjogXCJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9vYXV0aDIvdjEvY2VydHNcIixcbiAgXCJjbGllbnRfeDUwOV9jZXJ0X3VybFwiOiBcImh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3JvYm90L3YxL21ldGFkYXRhL3g1MDkvcHVsbC1zZWNyZXQtdGVzdGluZyU0MGJ1aWxkLWNyZC10ZXN0aW5nLmlhbS5nc2VydmljZWFjY291bnQuY29tXCJcbn0iLCJlbWFpbCI6Im5vcmVwbHlAZ29vZ2xlLmNvbSIsImF1dGgiOiJYMnB6YjI1ZmEyVjVPbnNLSUNBaWRIbHdaU0k2SUNKelpYSjJhV05sWDJGalkyOTFiblFpTEFvZ0lDSndjbTlxWldOMFgybGtJam9nSW1KMWFXeGtMV055WkMxMFpYTjBhVzVuSWl3S0lDQWljSEpwZG1GMFpWOXJaWGxmYVdRaU9pQWlNRFV3TW1FME1XRTRNVEptWWpZMFkyVTFObUUyT0dWak5UZ3pNbUZpTUdKaE1URmpNVEZsTmlJc0NpQWdJbkJ5YVhaaGRHVmZhMlY1SWpvZ0lpMHRMUzB0UWtWSFNVNGdVRkpKVmtGVVJTQkxSVmt0TFMwdExWeHVUVWxKUlhaUlNVSkJSRUZPUW1kcmNXaHJhVWM1ZHpCQ1FWRkZSa0ZCVTBOQ1MyTjNaMmRUYWtGblJVRkJiMGxDUVZGRE9WZzBSVmxQUVZKaWVGRk5PRnh1UkRKNFdHTmhXbFJySzJkWk9HVnFkVGs0ZEV4eFExRnhWSEpIVFZjNVVtVlVNbmhQV1RWQmVXZGhiRkJRSzNBM2VWaEZZMnQzUWtRdlNHaE5NR2RzU1Z4dU4wMVVUR1JsWlV0WGNpdHlRVEZNZDBoYWVWZEdWemRJTUU5dVpqZDNibGxJUlVoTVYxVnRZek5DUTA5U1JVUjBTRkphTjFweVVFSm1NVWhVUVVFdk0xeHVUVzVYTld4YVNGTk9PVzlxZWxOVFJuYzJRVloxTm1vMllYaGlRa2xKU2pjMU5FeHlaMHRsUVZsMWNuZG1TVkV5VWt4VWRUSXdNV3N5U1hGTVdXSm9ZbHh1TTIxVFZrYzFVaXQwWWt0Nk1VTjJUVFV6YmtoRFlqZERablZXWlZkemNrRTRhMnMzZUVoeWNreFJTMDF0U1RsMk1uTm5VblZuZVUxQmVuZDZMelo2VGx4dWFEVXZhVTE0ZUdkbGNUVlhPSGhyVm5nelNqSnVXVGhLU21SSVlXWXZWRFpCUjNOUFRrVnZORE53ZUdWcFVWWnFibEptTDB0dU1UQlVRMk15UlhOSldWeHVVelE1VlhOYU4wSkJaMDFDUVVGRlEyZG5SVUZCZFhCc1pIVnJRMUZSZFVRMVZTOW5ZVzFJZERkSFoxY3pRVTFXTVRobGNXNUlia05oTW1wc1lXZ3JVMXh1UVdWVlIyNW9aMHBxVG5aRkszQk5SbXhUZGpWMVpqSndNa3MwWldRdmIzaEVOaXREY0RsYVdFUlNhbWQyWm5SSmVYRnFiSHBpZDNaR1kyZDZkMDUxUkZ4dWVXZFZhM1Z3TjBobFkwUnpSRGhVZEdWQmIySlVMMVp3ZDNFMmVrdE5ja0ozUTNaT2EzWjVObUpXYkc5RmFqVjRNMkpZYzJGNFpUazFSRTh2ZVhCMU5seHVkekJYT1RkNmVIZDNSRXBaTmt0UlkwbFhUV3BvY2tkNGRuZFlOMjVwVlVObFRUUnNaVmRDUkhsSGRIY3hlbVZLYmpSb1JXTTJUak5oYWxGaFkxaExZMXh1S3pSUmJIaGphV0Z0V1hGUlYySlFiblI0VjFGb2FGRjZZMGhYVEdreWJEbGpSbUphUkRjcmRVcE1SalJwVGpaNU9HMVdUbFV6U3pOTE1XSlNXWEpUUkZ4dVVsVndNMkZWVmtKWWJVWm5LMW92TW5CMVZrd3JiVlV6YWpOTVRGZFplVUpQYTJWMmRVOXRaR2RSUzBKblVVUmxNMGRKVVd0NVYwbFRNVFJVWkUxUFUxeHVRbWxMUWtORWVUaG9PVFpsYUV3d1NHdEVZbmxQYTFOMFVFdGtSamx3ZFVWNFduaG9lVGR2YWtoRFNVMDFSbFp5Y0ZKT01qVndOSE5GZW5kR1lXTXJkbHh1U2tsR1owVjJjVGR0V0dadFdHbFlTVTVwWlZCdlJWRmhRMjV1ZUVSNFYyZHRjakJJVlV0RmJWTTViMDFrWjB4alIxVXJhME5XUjA1emVqZEJUM1Z0TUZ4dVMzRlpNM015TWxFNWJGRTJOMFpQZVhGcGRUaFhSbEUzVVZGTFFtZFJSRnBwUm1oVVJWcHJVRVZqY1ZwcWJuZEtjRlJDTlRaYVYxQTVTMVJ6YkZwUU4xeHVkMVUwWW5wcE5ua3JiV1Y1WmpOTlNpczBUREpUZVVoak0yTndVMDFpYW5ST1QxcERkRFEzWWprd09FWlZiVXhZVlVkT2FHTjNkVnBxUlZGNFJtVjVNRnh1YlRReFkxTTFaVFJRTkRsaU9XNDJlVXhDYWtKeVFtOXhjekpYUW1Gc01tVm5aR2hPU2xOelExZHZhVnBRT0M5YVZEaGxaMWgyYURkaU9URnFlbTlMTWx4dWNUSlFWVzFCTkVSblVVdENaMEZYVERKSmFuWkZTVEJQZVhneVV6RXhZMjR2WlROWFNtRlVVR2RPVUZSSE9UQXpWWEJoSzNGdWVtaFBTWGdyVFdGeGFGeHVVRVkwVjNOMVFYa3dRVzluUjBwM1owNUtZazQ0U0haTFZYTkZWSFpCTlhkNWVVNHpPVmhPTjNjd1kyaGhja1pNTXpkdmMxVXJWMDlCZWtScWJtcGpjMXh1UVhFMVR6ZEhVRWR0V0hWaU5rVkNVVUpRU21oS1VERjNlVFIyTDBzeFpraG5MMFkwTjNFMFpqUXdURUZLVDJ0aFdWSktSRFZJZWtKQmIwZENRVTVWYUZ4dWJrbENVRXB4Y1RSSlRYWlJObU5ET1djNFFpc3hlRmxFWldFNUwxbHJNWGNyVTIxUVIzWjNja1ZZZVROSFMzZzBTemRzUzNCaVVIbzNiVFJZTXpOemVGeHVjMFZWTHl0Wk1sWlJiWGRTWVRGNFVXMHZOVE55U3pkV01tdzFTbVl2UkRRd01HcFNiVFphWmxOQlQzWm5SRlJ5ZEZwdVZVZEtUWEo2T1VVM2RVNTNOMXh1YkdkVlNETktjbWwyZVVZdlpuaE5TVGh4YzNwUlluZFlVREFyZUc1NWNVRjRSVUZuWkhWTFFrRnZSMEZKVFN0UVUwNVpWME5hV0hoRWNGTklTVEU0WkZ4dWFrdHJiMEZpZHpKTmIzbDNVVWxzYTJWMVFXNHhaRmhHWVdReGVuTllVVWRrVkhKdFdIbDJOMDVRVUNzNFIxaENhMjVDVEdrelkzWjRWR2xzU2tsVGVWeHVkV05PY2tOTmFYRk9RVk51TDJSeE4yTlhSRVpWUVVKbmFsZ3hOa3BJTWtST1Jsb3ZiQzlWVmtZelRrUkJTbXBZUTNNeFdEZDVTVXA1V0VJMmIzZ3ZlbHh1WVZOc2NXeEpUVll6TlVSQ1JEZHhlRko1ZFV0eloyczlYRzR0TFMwdExVVk9SQ0JRVWtsV1FWUkZJRXRGV1MwdExTMHRYRzRpTEFvZ0lDSmpiR2xsYm5SZlpXMWhhV3dpT2lBaWNIVnNiQzF6WldOeVpYUXRkR1Z6ZEdsdVowQmlkV2xzWkMxamNtUXRkR1Z6ZEdsdVp5NXBZVzB1WjNObGNuWnBZMlZoWTJOdmRXNTBMbU52YlNJc0NpQWdJbU5zYVdWdWRGOXBaQ0k2SUNJeE1EYzVNelU0TmpJd016TXdNalV5TlRFek5USWlMQW9nSUNKaGRYUm9YM1Z5YVNJNklDSm9kSFJ3Y3pvdkwyRmpZMjkxYm5SekxtZHZiMmRzWlM1amIyMHZieTl2WVhWMGFESXZZWFYwYUNJc0NpQWdJblJ2YTJWdVgzVnlhU0k2SUNKb2RIUndjem92TDJGalkyOTFiblJ6TG1kdmIyZHNaUzVqYjIwdmJ5OXZZWFYwYURJdmRHOXJaVzRpTEFvZ0lDSmhkWFJvWDNCeWIzWnBaR1Z5WDNnMU1EbGZZMlZ5ZEY5MWNtd2lPaUFpYUhSMGNITTZMeTkzZDNjdVoyOXZaMnhsWVhCcGN5NWpiMjB2YjJGMWRHZ3lMM1l4TDJObGNuUnpJaXdLSUNBaVkyeHBaVzUwWDNnMU1EbGZZMlZ5ZEY5MWNtd2lPaUFpYUhSMGNITTZMeTkzZDNjdVoyOXZaMnhsWVhCcGN5NWpiMjB2Y205aWIzUXZkakV2YldWMFlXUmhkR0V2ZURVd09TOXdkV3hzTFhObFkzSmxkQzEwWlhOMGFXNW5KVFF3WW5WcGJHUXRZM0prTFhSbGMzUnBibWN1YVdGdExtZHpaWEoyYVdObFlXTmpiM1Z1ZEM1amIyMGlDbjA9In19 + .dockerconfigjson: eyAiYXV0aHMiOiB7ICJnY3IuaW8iOiB7ICJhdXRoIjogIlgycHpiMjVmYTJWNU9uc2dJQ0owZVhCbElqb2dJbk5sY25acFkyVmZZV05qYjNWdWRDSXNJQ0FpY0hKdmFtVmpkRjlwWkNJNklDSmlkV2xzWkMxamNtUXRkR1Z6ZEdsdVp5SXNJQ0FpY0hKcGRtRjBaVjlyWlhsZmFXUWlPaUFpTURVd01tRTBNV0U0TVRKbVlqWTBZMlUxTm1FMk9HVmpOVGd6TW1GaU1HSmhNVEZqTVRGbE5pSXNJQ0FpY0hKcGRtRjBaVjlyWlhraU9pQWlMUzB0TFMxQ1JVZEpUaUJRVWtsV1FWUkZJRXRGV1MwdExTMHRYRzVOU1VsRmRsRkpRa0ZFUVU1Q1oydHhhR3RwUnpsM01FSkJVVVZHUVVGVFEwSkxZM2RuWjFOcVFXZEZRVUZ2U1VKQlVVTTVXRFJGV1U5QlVtSjRVVTA0WEc1RU1uaFlZMkZhVkdzcloxazRaV3AxT1RoMFRIRkRVWEZVY2tkTlZ6bFNaVlF5ZUU5Wk5VRjVaMkZzVUZBcmNEZDVXRVZqYTNkQ1JDOUlhRTB3WjJ4SlhHNDNUVlJNWkdWbFMxZHlLM0pCTVV4M1NGcDVWMFpYTjBnd1QyNW1OM2R1V1VoRlNFeFhWVzFqTTBKRFQxSkZSSFJJVWxvM1duSlFRbVl4U0ZSQlFTOHpYRzVOYmxjMWJGcElVMDQ1YjJwNlUxTkdkelpCVm5VMmFqWmhlR0pDU1VsS056VTBUSEpuUzJWQldYVnlkMlpKVVRKU1RGUjFNakF4YXpKSmNVeFpZbWhpWEc0emJWTldSelZTSzNSaVMzb3hRM1pOTlROdVNFTmlOME5tZFZabFYzTnlRVGhyYXpkNFNISnlURkZMVFcxSk9YWXljMmRTZFdkNVRVRjZkM292Tm5wT1hHNW9OUzlwVFhoNFoyVnhOVmM0ZUd0V2VETktNbTVaT0VwS1pFaGhaaTlVTmtGSGMwOU9SVzgwTTNCNFpXbFJWbXB1VW1ZdlMyNHhNRlJEWXpKRmMwbFpYRzVUTkRsVmMxbzNRa0ZuVFVKQlFVVkRaMmRGUVVGMWNHeGtkV3REVVZGMVJEVlZMMmRoYlVoME4wZG5Wek5CVFZZeE9HVnhia2h1UTJFeWFteGhhQ3RUWEc1QlpWVkhibWhuU21wT2RrVXJjRTFHYkZOMk5YVm1NbkF5U3pSbFpDOXZlRVEySzBOd09WcFlSRkpxWjNabWRFbDVjV3BzZW1KM2RrWmpaM3AzVG5WRVhHNTVaMVZyZFhBM1NHVmpSSE5FT0ZSMFpVRnZZbFF2Vm5CM2NUWjZTMDF5UW5kRGRrNXJkbmsyWWxac2IwVnFOWGd6WWxoellYaGxPVFZFVHk5NWNIVTJYRzUzTUZjNU4zcDRkM2RFU2xrMlMxRmpTVmROYW1oeVIzaDJkMWczYm1sVlEyVk5OR3hsVjBKRWVVZDBkekY2WlVwdU5HaEZZelpPTTJGcVVXRmpXRXRqWEc0ck5GRnNlR05wWVcxWmNWRlhZbEJ1ZEhoWFVXaG9VWHBqU0ZkTWFUSnNPV05HWWxwRU55dDFTa3hHTkdsT05uazRiVlpPVlROTE0wc3hZbEpaY2xORVhHNVNWWEF6WVZWV1FsaHRSbWNyV2k4eWNIVldUQ3R0VlROcU0weE1WMWw1UWs5clpYWjFUMjFrWjFGTFFtZFJSR1V6UjBsUmEzbFhTVk14TkZSa1RVOVRYRzVDYVV0Q1EwUjVPR2c1Tm1Wb1REQklhMFJpZVU5clUzUlFTMlJHT1hCMVJYaGFlR2g1TjI5cVNFTkpUVFZHVm5Kd1VrNHlOWEEwYzBWNmQwWmhZeXQyWEc1S1NVWm5SWFp4TjIxWVptMVlhVmhKVG1sbFVHOUZVV0ZEYm01NFJIaFhaMjF5TUVoVlMwVnRVemx2VFdSblRHTkhWU3RyUTFaSFRuTjZOMEZQZFcwd1hHNUxjVmt6Y3pJeVVUbHNVVFkzUms5NWNXbDFPRmRHVVRkUlVVdENaMUZFV21sR2FGUkZXbXRRUldOeFdtcHVkMHB3VkVJMU5scFhVRGxMVkhOc1dsQTNYRzUzVlRSaWVtazJlU3R0WlhsbU0wMUtLelJNTWxONVNHTXpZM0JUVFdKcWRFNVBXa04wTkRkaU9UQTRSbFZ0VEZoVlIwNW9ZM2QxV21wRlVYaEdaWGt3WEc1dE5ERmpVelZsTkZBME9XSTVialo1VEVKcVFuSkNiM0Z6TWxkQ1lXd3laV2RrYUU1S1UzTkRWMjlwV2xBNEwxcFVPR1ZuV0hab04ySTVNV3A2YjBzeVhHNXhNbEJWYlVFMFJHZFJTMEpuUVZkTU1rbHFka1ZKTUU5NWVESlRNVEZqYmk5bE0xZEtZVlJRWjA1UVZFYzVNRE5WY0dFcmNXNTZhRTlKZUN0TllYRm9YRzVRUmpSWGMzVkJlVEJCYjJkSFNuZG5Ua3BpVGpoSWRrdFZjMFZVZGtFMWQzbDVUak01V0U0M2R6QmphR0Z5Umt3ek4yOXpWU3RYVDBGNlJHcHVhbU56WEc1QmNUVlBOMGRRUjIxWWRXSTJSVUpSUWxCS2FFcFFNWGQ1TkhZdlN6Rm1TR2N2UmpRM2NUUm1OREJNUVVwUGEyRlpVa3BFTlVoNlFrRnZSMEpCVGxWb1hHNXVTVUpRU25GeE5FbE5kbEUyWTBNNVp6aENLekY0V1VSbFlUa3ZXV3N4ZHl0VGJWQkhkbmR5UlZoNU0wZExlRFJMTjJ4TGNHSlFlamR0TkZnek0zTjRYRzV6UlZVdksxa3lWbEZ0ZDFKaE1YaFJiUzgxTTNKTE4xWXliRFZLWmk5RU5EQXdhbEp0TmxwbVUwRlBkbWRFVkhKMFdtNVZSMHBOY25vNVJUZDFUbmMzWEc1c1oxVklNMHB5YVhaNVJpOW1lRTFKT0hGemVsRmlkMWhRTUN0NGJubHhRWGhGUVdka2RVdENRVzlIUVVsTksxQlRUbGxYUTFwWWVFUndVMGhKTVRoa1hHNXFTMnR2UVdKM01rMXZlWGRSU1d4clpYVkJiakZrV0VaaFpERjZjMWhSUjJSVWNtMVllWFkzVGxCUUt6aEhXRUpyYmtKTWFUTmpkbmhVYVd4S1NWTjVYRzUxWTA1eVEwMXBjVTVCVTI0dlpIRTNZMWRFUmxWQlFtZHFXREUyU2tneVJFNUdXaTlzTDFWV1JqTk9SRUZLYWxoRGN6RllOM2xKU25sWVFqWnZlQzk2WEc1aFUyeHhiRWxOVmpNMVJFSkVOM0Y0VW5sMVMzTm5hejFjYmkwdExTMHRSVTVFSUZCU1NWWkJWRVVnUzBWWkxTMHRMUzFjYmlJc0lDQWlZMnhwWlc1MFgyVnRZV2xzSWpvZ0luQjFiR3d0YzJWamNtVjBMWFJsYzNScGJtZEFZblZwYkdRdFkzSmtMWFJsYzNScGJtY3VhV0Z0TG1kelpYSjJhV05sWVdOamIzVnVkQzVqYjIwaUxDQWdJbU5zYVdWdWRGOXBaQ0k2SUNJeE1EYzVNelU0TmpJd016TXdNalV5TlRFek5USWlMQ0FnSW1GMWRHaGZkWEpwSWpvZ0ltaDBkSEJ6T2k4dllXTmpiM1Z1ZEhNdVoyOXZaMnhsTG1OdmJTOXZMMjloZFhSb01pOWhkWFJvSWl3Z0lDSjBiMnRsYmw5MWNta2lPaUFpYUhSMGNITTZMeTloWTJOdmRXNTBjeTVuYjI5bmJHVXVZMjl0TDI4dmIyRjFkR2d5TDNSdmEyVnVJaXdnSUNKaGRYUm9YM0J5YjNacFpHVnlYM2cxTURsZlkyVnlkRjkxY213aU9pQWlhSFIwY0hNNkx5OTNkM2N1WjI5dloyeGxZWEJwY3k1amIyMHZiMkYxZEdneUwzWXhMMk5sY25Seklpd2dJQ0pqYkdsbGJuUmZlRFV3T1Y5alpYSjBYM1Z5YkNJNklDSm9kSFJ3Y3pvdkwzZDNkeTVuYjI5bmJHVmhjR2x6TG1OdmJTOXliMkp2ZEM5Mk1TOXRaWFJoWkdGMFlTOTROVEE1TDNCMWJHd3RjMlZqY21WMExYUmxjM1JwYm1jbE5EQmlkV2xzWkMxamNtUXRkR1Z6ZEdsdVp5NXBZVzB1WjNObGNuWnBZMlZoWTJOdmRXNTBMbU52YlNKOSIgfSB9LCAiSHR0cEhlYWRlcnMiOiB7ICJVc2VyLUFnZW50IjogIkRvY2tlci1DbGllbnQvMTguMDYuMS1jZSAobGludXgpIiB9fQ== diff --git a/third_party/VENDOR-LICENSE b/third_party/VENDOR-LICENSE index 1edeee5d..9eb2b68c 100644 --- a/third_party/VENDOR-LICENSE +++ b/third_party/VENDOR-LICENSE @@ -1057,6 +1057,213 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +=========================================================== +Import: github.com/knative/build/vendor/github.com/google/go-containerregistry + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + =========================================================== Import: github.com/knative/build/vendor/github.com/google/gofuzz @@ -3215,6 +3422,33 @@ THE SOFTWARE. +=========================================================== +Import: github.com/knative/build/vendor/github.com/sirupsen/logrus + +The MIT License (MIT) + +Copyright (c) 2014 Simon Eskildsen + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + + + =========================================================== Import: github.com/knative/build/vendor/github.com/spf13/pflag @@ -4625,3 +4859,211 @@ Import: github.com/knative/build/vendor/k8s.io/client-go See the License for the specific language governing permissions and limitations under the License. + + +=========================================================== +Import: github.com/knative/build/vendor/k8s.io/test-infra + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016 The Kubernetes Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/vendor/github.com/google/go-containerregistry/LICENSE b/vendor/github.com/google/go-containerregistry/LICENSE new file mode 100644 index 00000000..7a4a3ea2 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/vendor/github.com/google/go-containerregistry/cmd/ko/test/kodata/kenobi b/vendor/github.com/google/go-containerregistry/cmd/ko/test/kodata/kenobi new file mode 120000 index 00000000..5d7eddc7 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/cmd/ko/test/kodata/kenobi @@ -0,0 +1 @@ +../kenobi \ No newline at end of file diff --git a/vendor/github.com/google/go-containerregistry/pkg/authn/anon.go b/vendor/github.com/google/go-containerregistry/pkg/authn/anon.go new file mode 100644 index 00000000..c9c08ec7 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/authn/anon.go @@ -0,0 +1,26 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package authn + +// anonymous implements Authenticator for anonymous authentication. +type anonymous struct{} + +// Authorization implements Authenticator. +func (a *anonymous) Authorization() (string, error) { + return "", nil +} + +// Anonymous is a singleton Authenticator for providing anonymous auth. +var Anonymous Authenticator = &anonymous{} diff --git a/vendor/github.com/google/go-containerregistry/pkg/authn/auth.go b/vendor/github.com/google/go-containerregistry/pkg/authn/auth.go new file mode 100644 index 00000000..c39ee5a9 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/authn/auth.go @@ -0,0 +1,29 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package authn + +import ( + "fmt" +) + +// auth implements Authenticator for an "auth" entry of the docker config. +type auth struct { + token string +} + +// Authorization implements Authenticator. +func (a *auth) Authorization() (string, error) { + return fmt.Sprintf("Basic %s", a.token), nil +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/authn/authn.go b/vendor/github.com/google/go-containerregistry/pkg/authn/authn.go new file mode 100644 index 00000000..30e935cb --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/authn/authn.go @@ -0,0 +1,21 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package authn + +// Authenticator is used to authenticate Docker transports. +type Authenticator interface { + // Authorization returns the value to use in an http transport's Authorization header. + Authorization() (string, error) +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/authn/basic.go b/vendor/github.com/google/go-containerregistry/pkg/authn/basic.go new file mode 100644 index 00000000..7cd49840 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/authn/basic.go @@ -0,0 +1,33 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package authn + +import ( + "encoding/base64" + "fmt" +) + +// Basic implements Authenticator for basic authentication. +type Basic struct { + Username string + Password string +} + +// Authorization implements Authenticator. +func (b *Basic) Authorization() (string, error) { + delimited := fmt.Sprintf("%s:%s", b.Username, b.Password) + encoded := base64.StdEncoding.EncodeToString([]byte(delimited)) + return fmt.Sprintf("Basic %s", encoded), nil +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/authn/bearer.go b/vendor/github.com/google/go-containerregistry/pkg/authn/bearer.go new file mode 100644 index 00000000..cb1ae584 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/authn/bearer.go @@ -0,0 +1,29 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package authn + +import ( + "fmt" +) + +// Bearer implements Authenticator for bearer authentication. +type Bearer struct { + Token string `json:"token"` +} + +// Authorization implements Authenticator. +func (b *Bearer) Authorization() (string, error) { + return fmt.Sprintf("Bearer %s", b.Token), nil +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/authn/doc.go b/vendor/github.com/google/go-containerregistry/pkg/authn/doc.go new file mode 100644 index 00000000..c2a5fc02 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/authn/doc.go @@ -0,0 +1,17 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package authn defines different methods of authentication for +// talking to a container registry. +package authn diff --git a/vendor/github.com/google/go-containerregistry/pkg/authn/helper.go b/vendor/github.com/google/go-containerregistry/pkg/authn/helper.go new file mode 100644 index 00000000..4a8ec240 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/authn/helper.go @@ -0,0 +1,96 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package authn + +import ( + "bytes" + "encoding/json" + "fmt" + "os/exec" + "strings" + + "github.com/google/go-containerregistry/pkg/name" +) + +// magicNotFoundMessage is the string that the CLI special cases to mean +// that a given registry domain wasn't found. +const ( + magicNotFoundMessage = "credentials not found in native keychain" +) + +// runner allows us to swap out how we "Run" os/exec commands. +type runner interface { + Run(*exec.Cmd) error +} + +// defaultRunner implements runner by just calling Run(). +type defaultRunner struct{} + +// Run implements runner. +func (dr *defaultRunner) Run(cmd *exec.Cmd) error { + return cmd.Run() +} + +// helper executes the named credential helper against the given domain. +type helper struct { + name string + domain name.Registry + + // We add this layer of indirection to facilitate unit testing. + r runner +} + +// helperOutput is the expected JSON output form of a credential helper +// (or at least these are the fields that we care about). +type helperOutput struct { + Username string + Secret string +} + +// Authorization implements Authenticator. +func (h *helper) Authorization() (string, error) { + helperName := fmt.Sprintf("docker-credential-%s", h.name) + // We want to execute: + // echo -n {domain} | docker-credential-{name} get + cmd := exec.Command(helperName, "get") + + // Some keychains expect a scheme: + // https://github.com/bazelbuild/rules_docker/issues/111 + cmd.Stdin = strings.NewReader(fmt.Sprintf("https://%v", h.domain)) + + var out bytes.Buffer + cmd.Stdout = &out + err := h.r.Run(cmd) + + // If we see this specific message, it means the domain wasn't found + // and we should fall back on anonymous auth. + output := strings.TrimSpace(out.String()) + if output == magicNotFoundMessage { + return Anonymous.Authorization() + } + + if err != nil { + return "", err + } + + // Any other output should be parsed as JSON and the Username / Secret + // fields used for Basic authentication. + ho := helperOutput{} + if err := json.Unmarshal([]byte(output), &ho); err != nil { + return "", err + } + b := Basic{Username: ho.Username, Password: ho.Secret} + return b.Authorization() +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/authn/keychain.go b/vendor/github.com/google/go-containerregistry/pkg/authn/keychain.go new file mode 100644 index 00000000..aee1fedb --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/authn/keychain.go @@ -0,0 +1,152 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package authn + +import ( + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "log" + "os" + "path/filepath" + "runtime" + + "github.com/google/go-containerregistry/pkg/name" +) + +// Keychain is an interface for resolving an image reference to a credential. +type Keychain interface { + // Resolve looks up the most appropriate credential for the specified registry. + Resolve(name.Registry) (Authenticator, error) +} + +// defaultKeychain implements Keychain with the semantics of the standard Docker +// credential keychain. +type defaultKeychain struct{} + +// configDir returns the directory containing Docker's config.json +func configDir() (string, error) { + if dc := os.Getenv("DOCKER_CONFIG"); dc != "" { + return dc, nil + } + if h := dockerUserHomeDir(); h != "" { + return filepath.Join(dockerUserHomeDir(), ".docker"), nil + } + return "", errNoHomeDir +} + +var errNoHomeDir = errors.New("could not determine home directory") + +// dockerUserHomeDir returns the current user's home directory, as interpreted by Docker. +func dockerUserHomeDir() string { + if runtime.GOOS == "windows" { + // Docker specifically expands "%USERPROFILE%" on Windows, + return os.Getenv("USERPROFILE") + } + // Docker defaults to "$HOME" Linux and OSX. + return os.Getenv("HOME") +} + +// authEntry is a helper for JSON parsing an "auth" entry of config.json +// This is not meant for direct consumption. +type authEntry struct { + Auth string `json:"auth"` + Username string `json:"username"` + Password string `json:"password"` +} + +// cfg is a helper for JSON parsing Docker's config.json +// This is not meant for direct consumption. +type cfg struct { + CredHelper map[string]string `json:"credHelpers,omitempty"` + CredStore string `json:"credsStore,omitempty"` + Auths map[string]authEntry `json:"auths,omitempty"` +} + +// There are a variety of ways a domain may get qualified within the Docker credential file. +// We enumerate them here as format strings. +var ( + domainForms = []string{ + // Allow naked domains + "%s", + // Allow scheme-prefixed. + "https://%s", + "http://%s", + // Allow scheme-prefixes with version in url path. + "https://%s/v1/", + "http://%s/v1/", + "https://%s/v2/", + "http://%s/v2/", + } + + // Export an instance of the default keychain. + DefaultKeychain Keychain = &defaultKeychain{} +) + +// Resolve implements Keychain. +func (dk *defaultKeychain) Resolve(reg name.Registry) (Authenticator, error) { + dir, err := configDir() + if err != nil { + log.Printf("Unable to determine config dir: %v", err) + return Anonymous, nil + } + file := filepath.Join(dir, "config.json") + content, err := ioutil.ReadFile(file) + if err != nil { + log.Printf("Unable to read %q: %v", file, err) + return Anonymous, nil + } + + var cf cfg + if err := json.Unmarshal(content, &cf); err != nil { + log.Printf("Unable to parse %q: %v", file, err) + return Anonymous, nil + } + + // Per-registry credential helpers take precedence. + if cf.CredHelper != nil { + for _, form := range domainForms { + if entry, ok := cf.CredHelper[fmt.Sprintf(form, reg.Name())]; ok { + return &helper{name: entry, domain: reg, r: &defaultRunner{}}, nil + } + } + } + + // A global credential helper is next in precedence. + if cf.CredStore != "" { + return &helper{name: cf.CredStore, domain: reg, r: &defaultRunner{}}, nil + } + + // Lastly, the 'auths' section directly contains basic auth entries. + if cf.Auths != nil { + for _, form := range domainForms { + if entry, ok := cf.Auths[fmt.Sprintf(form, reg.Name())]; ok { + if entry.Auth != "" { + return &auth{entry.Auth}, nil + } else if entry.Username != "" { + return &Basic{Username: entry.Username, Password: entry.Password}, nil + } else { + // TODO(mattmoor): Support identitytoken + // TODO(mattmoor): Support registrytoken + return nil, fmt.Errorf("Unsupported entry in \"auths\" section of %q", file) + } + } + } + } + + // Fallback on anonymous. + return Anonymous, nil +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/authn/multikeychain.go b/vendor/github.com/google/go-containerregistry/pkg/authn/multikeychain.go new file mode 100644 index 00000000..9d7fb314 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/authn/multikeychain.go @@ -0,0 +1,45 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package authn + +import ( + "github.com/google/go-containerregistry/pkg/name" +) + +type multiKeychain struct { + keychains []Keychain +} + +// Assert that our multi-keychain implements Keychain. +var _ (Keychain) = (*multiKeychain)(nil) + +// NewMultiKeychain composes a list of keychains into one new keychain. +func NewMultiKeychain(kcs ...Keychain) Keychain { + return &multiKeychain{keychains: kcs} +} + +// Resolve implements Keychain. +func (mk *multiKeychain) Resolve(reg name.Registry) (Authenticator, error) { + for _, kc := range mk.keychains { + auth, err := kc.Resolve(reg) + if err != nil { + return nil, err + } + if auth != Anonymous { + return auth, nil + } + } + return Anonymous, nil +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/check.go b/vendor/github.com/google/go-containerregistry/pkg/name/check.go new file mode 100644 index 00000000..01a25d55 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/name/check.go @@ -0,0 +1,52 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package name + +import ( + "strings" + "unicode/utf8" +) + +// Strictness defines the level of strictness for name validation. +type Strictness int + +// Enums for CRUD operations. +const ( + StrictValidation Strictness = iota + WeakValidation +) + +// stripRunesFn returns a function which returns -1 (i.e. a value which +// signals deletion in strings.Map) for runes in 'runes', and the rune otherwise. +func stripRunesFn(runes string) func(rune) rune { + return func(r rune) rune { + if strings.ContainsRune(runes, r) { + return -1 + } + return r + } +} + +// checkElement checks a given named element matches character and length restrictions. +// Returns true if the given element adheres to the given restrictions, false otherwise. +func checkElement(name, element, allowedRunes string, minRunes, maxRunes int) error { + numRunes := utf8.RuneCountInString(element) + if (numRunes < minRunes) || (maxRunes < numRunes) { + return NewErrBadName("%s must be between %d and %d runes in length: %s", name, minRunes, maxRunes, element) + } else if len(strings.Map(stripRunesFn(allowedRunes), element)) != 0 { + return NewErrBadName("%s can only contain the runes `%s`: %s", name, allowedRunes, element) + } + return nil +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/digest.go b/vendor/github.com/google/go-containerregistry/pkg/name/digest.go new file mode 100644 index 00000000..ea6287a8 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/name/digest.go @@ -0,0 +1,91 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package name defines structured types for representing image references. +package name + +import ( + "strings" +) + +const ( + // These have the form: sha256: + // TODO(dekkagaijin): replace with opencontainers/go-digest or docker/distribution's validation. + digestChars = "sh:0123456789abcdef" + digestDelim = "@" +) + +// Digest stores a digest name in a structured form. +type Digest struct { + Repository + digest string +} + +// Ensure Digest implements Reference +var _ Reference = (*Digest)(nil) + +// Context implements Reference. +func (d Digest) Context() Repository { + return d.Repository +} + +// Identifier implements Reference. +func (d Digest) Identifier() string { + return d.DigestStr() +} + +// DigestStr returns the digest component of the Digest. +func (d Digest) DigestStr() string { + return d.digest +} + +// Name returns the name from which the Digest was derived. +func (d Digest) Name() string { + return d.Repository.Name() + digestDelim + d.DigestStr() +} + +func (d Digest) String() string { + return d.Name() +} + +func checkDigest(name string) error { + return checkElement("digest", name, digestChars, 7+64, 7+64) +} + +// NewDigest returns a new Digest representing the given name, according to the given strictness. +func NewDigest(name string, strict Strictness) (Digest, error) { + // Split on "@" + parts := strings.Split(name, digestDelim) + if len(parts) != 2 { + return Digest{}, NewErrBadName("a digest must contain exactly one '@' separator (e.g. registry/repository@digest) saw: %s", name) + } + base := parts[0] + digest := parts[1] + + // We don't require a digest, but if we get one check it's valid, + // even when not being strict. + // If we are being strict, we want to validate the digest regardless in case + // it's empty. + if digest != "" || strict == StrictValidation { + if err := checkDigest(digest); err != nil { + return Digest{}, err + } + } + + repo, err := NewRepository(base, strict) + if err != nil { + return Digest{}, err + } + return Digest{repo, digest}, nil +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/errors.go b/vendor/github.com/google/go-containerregistry/pkg/name/errors.go new file mode 100644 index 00000000..7847cc5d --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/name/errors.go @@ -0,0 +1,37 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package name + +import "fmt" + +// ErrBadName is an error for when a bad docker name is supplied. +type ErrBadName struct { + info string +} + +func (e *ErrBadName) Error() string { + return e.info +} + +// NewErrBadName returns a ErrBadName which returns the given formatted string from Error(). +func NewErrBadName(fmtStr string, args ...interface{}) *ErrBadName { + return &ErrBadName{fmt.Sprintf(fmtStr, args...)} +} + +// IsErrBadName returns true if the given error is an ErrBadName. +func IsErrBadName(err error) bool { + _, ok := err.(*ErrBadName) + return ok +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/ref.go b/vendor/github.com/google/go-containerregistry/pkg/name/ref.go new file mode 100644 index 00000000..58775daa --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/name/ref.go @@ -0,0 +1,50 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package name + +import ( + "errors" + "fmt" +) + +// Reference defines the interface that consumers use when they can +// take either a tag or a digest. +type Reference interface { + fmt.Stringer + + // Context accesses the Repository context of the reference. + Context() Repository + + // Identifier accesses the type-specific portion of the reference. + Identifier() string + + // Name is the fully-qualified reference name. + Name() string + + // Scope is the scope needed to access this reference. + Scope(string) string +} + +// ParseReference parses the string as a reference, either by tag or digest. +func ParseReference(s string, strict Strictness) (Reference, error) { + if t, err := NewTag(s, strict); err == nil { + return t, nil + } + if d, err := NewDigest(s, strict); err == nil { + return d, nil + } + // TODO: Combine above errors into something more useful? + return nil, errors.New("could not parse reference") +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/registry.go b/vendor/github.com/google/go-containerregistry/pkg/name/registry.go new file mode 100644 index 00000000..c2bf5758 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/name/registry.go @@ -0,0 +1,124 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package name + +import ( + "net/url" + "regexp" + "strings" +) + +const ( + DefaultRegistry = "index.docker.io" + defaultRegistryAlias = "docker.io" +) + +// Detect more complex forms of local references. +var reLocal = regexp.MustCompile(`.*\.local(?:host)?(?::\d{1,5})?$`) + +// Detect the loopback IP (127.0.0.1) +var reLoopback = regexp.MustCompile(regexp.QuoteMeta("127.0.0.1")) + +// Detect the loopback IPV6 (::1) +var reipv6Loopback = regexp.MustCompile(regexp.QuoteMeta("::1")) + +// Registry stores a docker registry name in a structured form. +type Registry struct { + insecure bool + registry string +} + +// RegistryStr returns the registry component of the Registry. +func (r Registry) RegistryStr() string { + if r.registry != "" { + return r.registry + } + return DefaultRegistry +} + +// Name returns the name from which the Registry was derived. +func (r Registry) Name() string { + return r.RegistryStr() +} + +func (r Registry) String() string { + return r.Name() +} + +// Scope returns the scope required to access the registry. +func (r Registry) Scope(string) string { + // The only resource under 'registry' is 'catalog'. http://goo.gl/N9cN9Z + return "registry:catalog:*" +} + +// Scheme returns https scheme for all the endpoints except localhost or when explicitly defined. +func (r Registry) Scheme() string { + if r.insecure { + return "http" + } + if strings.HasPrefix(r.Name(), "localhost:") { + return "http" + } + if reLocal.MatchString(r.Name()) { + return "http" + } + if reLoopback.MatchString(r.Name()) { + return "http" + } + if reipv6Loopback.MatchString(r.Name()) { + return "http" + } + return "https" +} + +func checkRegistry(name string) error { + // Per RFC 3986, registries (authorities) are required to be prefixed with "//" + // url.Host == hostname[:port] == authority + if url, err := url.Parse("//" + name); err != nil || url.Host != name { + return NewErrBadName("registries must be valid RFC 3986 URI authorities: %s", name) + } + return nil +} + +// NewRegistry returns a Registry based on the given name. +// Strict validation requires explicit, valid RFC 3986 URI authorities to be given. +func NewRegistry(name string, strict Strictness) (Registry, error) { + if strict == StrictValidation && len(name) == 0 { + return Registry{}, NewErrBadName("strict validation requires the registry to be explicitly defined") + } + + if err := checkRegistry(name); err != nil { + return Registry{}, err + } + + // Rewrite "docker.io" to "index.docker.io". + // See: https://github.com/google/go-containerregistry/issues/68 + if name == defaultRegistryAlias { + name = DefaultRegistry + } + + return Registry{registry: name}, nil +} + +// NewInsecureRegistry returns an Insecure Registry based on the given name. +// Strict validation requires explicit, valid RFC 3986 URI authorities to be given. +func NewInsecureRegistry(name string, strict Strictness) (Registry, error) { + reg, err := NewRegistry(name, strict) + if err != nil { + return Registry{}, err + } + reg.insecure = true + return reg, nil +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/repository.go b/vendor/github.com/google/go-containerregistry/pkg/name/repository.go new file mode 100644 index 00000000..43cc5b82 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/name/repository.go @@ -0,0 +1,99 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package name + +import ( + "fmt" + "strings" +) + +const ( + defaultNamespace = "library" + repositoryChars = "abcdefghijklmnopqrstuvwxyz0123456789_-./" + regRepoDelimiter = "/" +) + +// Repository stores a docker repository name in a structured form. +type Repository struct { + Registry + repository string +} + +// See https://docs.docker.com/docker-hub/official_repos +func hasImplicitNamespace(repo string, reg Registry) bool { + return !strings.ContainsRune(repo, '/') && reg.RegistryStr() == DefaultRegistry +} + +// RepositoryStr returns the repository component of the Repository. +func (r Repository) RepositoryStr() string { + if hasImplicitNamespace(r.repository, r.Registry) { + return fmt.Sprintf("%s/%s", defaultNamespace, r.repository) + } + return r.repository +} + +// Name returns the name from which the Repository was derived. +func (r Repository) Name() string { + regName := r.Registry.Name() + if regName != "" { + return regName + regRepoDelimiter + r.RepositoryStr() + } + return r.RepositoryStr() +} + +func (r Repository) String() string { + return r.Name() +} + +// Scope returns the scope required to perform the given action on the registry. +// TODO(jonjohnsonjr): consider moving scopes to a separate package. +func (r Repository) Scope(action string) string { + return fmt.Sprintf("repository:%s:%s", r.RepositoryStr(), action) +} + +func checkRepository(repository string) error { + return checkElement("repository", repository, repositoryChars, 2, 255) +} + +// NewRepository returns a new Repository representing the given name, according to the given strictness. +func NewRepository(name string, strict Strictness) (Repository, error) { + if len(name) == 0 { + return Repository{}, NewErrBadName("a repository name must be specified") + } + + var registry string + repo := name + parts := strings.SplitN(name, regRepoDelimiter, 2) + if len(parts) == 2 && (strings.ContainsRune(parts[0], '.') || strings.ContainsRune(parts[0], ':')) { + // The first part of the repository is treated as the registry domain + // iff it contains a '.' or ':' character, otherwise it is all repository + // and the domain defaults to Docker Hub. + registry = parts[0] + repo = parts[1] + } + + if err := checkRepository(repo); err != nil { + return Repository{}, err + } + + reg, err := NewRegistry(registry, strict) + if err != nil { + return Repository{}, err + } + if hasImplicitNamespace(repo, reg) && strict == StrictValidation { + return Repository{}, NewErrBadName("strict validation requires the full repository path (missing 'library')") + } + return Repository{reg, repo}, nil +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/tag.go b/vendor/github.com/google/go-containerregistry/pkg/name/tag.go new file mode 100644 index 00000000..b8375e1f --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/name/tag.go @@ -0,0 +1,101 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package name + +import ( + "strings" +) + +const ( + defaultTag = "latest" + // TODO(dekkagaijin): use the docker/distribution regexes for validation. + tagChars = "abcdefghijklmnopqrstuvwxyz0123456789_-.ABCDEFGHIJKLMNOPQRSTUVWXYZ" + tagDelim = ":" +) + +// Tag stores a docker tag name in a structured form. +type Tag struct { + Repository + tag string +} + +// Ensure Tag implements Reference +var _ Reference = (*Tag)(nil) + +// Context implements Reference. +func (t Tag) Context() Repository { + return t.Repository +} + +// Identifier implements Reference. +func (t Tag) Identifier() string { + return t.TagStr() +} + +// TagStr returns the tag component of the Tag. +func (t Tag) TagStr() string { + if t.tag != "" { + return t.tag + } + return defaultTag +} + +// Name returns the name from which the Tag was derived. +func (t Tag) Name() string { + return t.Repository.Name() + tagDelim + t.TagStr() +} + +func (t Tag) String() string { + return t.Name() +} + +// Scope returns the scope required to perform the given action on the tag. +func (t Tag) Scope(action string) string { + return t.Repository.Scope(action) +} + +func checkTag(name string) error { + return checkElement("tag", name, tagChars, 1, 127) +} + +// NewTag returns a new Tag representing the given name, according to the given strictness. +func NewTag(name string, strict Strictness) (Tag, error) { + base := name + tag := "" + + // Split on ":" + parts := strings.Split(name, tagDelim) + // Verify that we aren't confusing a tag for a hostname w/ port for the purposes of weak validation. + if len(parts) > 1 && !strings.Contains(parts[len(parts)-1], regRepoDelimiter) { + base = strings.Join(parts[:len(parts)-1], tagDelim) + tag = parts[len(parts)-1] + } + + // We don't require a tag, but if we get one check it's valid, + // even when not being strict. + // If we are being strict, we want to validate the tag regardless in case + // it's empty. + if tag != "" || strict == StrictValidation { + if err := checkTag(tag); err != nil { + return Tag{}, err + } + } + + repo, err := NewRepository(base, strict) + if err != nil { + return Tag{}, err + } + return Tag{repo, tag}, nil +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/config.go b/vendor/github.com/google/go-containerregistry/pkg/v1/config.go new file mode 100644 index 00000000..d1d809d9 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/config.go @@ -0,0 +1,130 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + "encoding/json" + "io" + "time" +) + +// ConfigFile is the configuration file that holds the metadata describing +// how to launch a container. The names of the fields are chosen to reflect +// the JSON payload of the ConfigFile as defined here: https://git.io/vrAEY +type ConfigFile struct { + Architecture string `json:"architecture"` + Container string `json:"container"` + Created Time `json:"created"` + DockerVersion string `json:"docker_version"` + History []History `json:"history"` + OS string `json:"os"` + RootFS RootFS `json:"rootfs"` + Config Config `json:"config"` + ContainerConfig Config `json:"container_config"` + OSVersion string `json:"osversion"` +} + +// History is one entry of a list recording how this container image was built. +type History struct { + Author string `json:"author"` + Created Time `json:"created"` + CreatedBy string `json:"created_by"` + Comment string `json:"comment"` + EmptyLayer bool `json:"empty_layer,omitempty"` +} + +// Time is a wrapper around time.Time to help with deep copying +type Time struct { + time.Time +} + +// DeepCopyInto creates a deep-copy of the Time value. The underlying time.Time +// type is effectively immutable in the time API, so it is safe to +// copy-by-assign, despite the presence of (unexported) Pointer fields. +func (t *Time) DeepCopyInto(out *Time) { + *out = *t +} + +// RootFS holds the ordered list of file system deltas that comprise the +// container image's root filesystem. +type RootFS struct { + Type string `json:"type"` + DiffIDs []Hash `json:"diff_ids"` +} + +// HealthConfig holds configuration settings for the HEALTHCHECK feature. +type HealthConfig struct { + // Test is the test to perform to check that the container is healthy. + // An empty slice means to inherit the default. + // The options are: + // {} : inherit healthcheck + // {"NONE"} : disable healthcheck + // {"CMD", args...} : exec arguments directly + // {"CMD-SHELL", command} : run command with system's default shell + Test []string `json:",omitempty"` + + // Zero means to inherit. Durations are expressed as integer nanoseconds. + Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks. + Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung. + StartPeriod time.Duration `json:",omitempty"` // The start period for the container to initialize before the retries starts to count down. + + // Retries is the number of consecutive failures needed to consider a container as unhealthy. + // Zero means inherit. + Retries int `json:",omitempty"` +} + +// Config is a submessage of the config file described as: +// The execution parameters which SHOULD be used as a base when running +// a container using the image. +// The names of the fields in this message are chosen to reflect the JSON +// payload of the Config as defined here: +// https://git.io/vrAET +// and +// https://github.com/opencontainers/image-spec/blob/master/config.md +type Config struct { + AttachStderr bool + AttachStdin bool + AttachStdout bool + Cmd []string + Healthcheck *HealthConfig + Domainname string + Entrypoint []string + Env []string + Hostname string + Image string + Labels map[string]string + OnBuild []string + OpenStdin bool + StdinOnce bool + Tty bool + User string + Volumes map[string]struct{} + WorkingDir string + ExposedPorts map[string]struct{} + ArgsEscaped bool + NetworkDisabled bool + MacAddress string + StopSignal string + Shell []string +} + +// ParseConfigFile parses the io.Reader's contents into a ConfigFile. +func ParseConfigFile(r io.Reader) (*ConfigFile, error) { + cf := ConfigFile{} + if err := json.NewDecoder(r).Decode(&cf); err != nil { + return nil, err + } + return &cf, nil +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/doc.go b/vendor/github.com/google/go-containerregistry/pkg/v1/doc.go new file mode 100644 index 00000000..c9b20317 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/doc.go @@ -0,0 +1,19 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package v1 defines structured types for OCI v1 images +// +k8s:deepcopy-gen=package + +//go:generate deepcopy-gen -O zz_deepcopy_generated --go-header-file $BOILER_PLATE_FILE -i . +package v1 diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/hash.go b/vendor/github.com/google/go-containerregistry/pkg/v1/hash.go new file mode 100644 index 00000000..f0db0d51 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/hash.go @@ -0,0 +1,111 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + "crypto/sha256" + "encoding/hex" + "encoding/json" + "fmt" + "hash" + "io" + "strconv" + "strings" +) + +// Hash is an unqualified digest of some content, e.g. sha256:deadbeef +type Hash struct { + // Algorithm holds the algorithm used to compute the hash. + Algorithm string + + // Hex holds the hex portion of the content hash. + Hex string +} + +// String reverses NewHash returning the string-form of the hash. +func (h Hash) String() string { + return fmt.Sprintf("%s:%s", h.Algorithm, h.Hex) +} + +// NewHash validates the input string is a hash and returns a strongly type Hash object. +func NewHash(s string) (Hash, error) { + h := Hash{} + if err := h.parse(s); err != nil { + return Hash{}, err + } + return h, nil +} + +// MarshalJSON implements json.Marshaler +func (h *Hash) MarshalJSON() ([]byte, error) { + return json.Marshal(h.String()) +} + +// UnmarshalJSON implements json.Unmarshaler +func (h *Hash) UnmarshalJSON(data []byte) error { + s, err := strconv.Unquote(string(data)) + if err != nil { + return err + } + return h.parse(s) +} + +// Hasher returns a hash.Hash for the named algorithm (e.g. "sha256") +func Hasher(name string) (hash.Hash, error) { + switch name { + case "sha256": + return sha256.New(), nil + default: + return nil, fmt.Errorf("unsupported hash: %q", name) + } +} + +func (h *Hash) parse(unquoted string) error { + parts := strings.Split(unquoted, ":") + if len(parts) != 2 { + return fmt.Errorf("too many parts in hash: %s", unquoted) + } + + rest := strings.TrimLeft(parts[1], "0123456789abcdef") + if len(rest) != 0 { + return fmt.Errorf("found non-hex character in hash: %c", rest[0]) + } + + hasher, err := Hasher(parts[0]) + if err != nil { + return err + } + // Compare the hex to the expected size (2 hex characters per byte) + if len(parts[1]) != hasher.Size()*2 { + return fmt.Errorf("wrong number of hex digits for %s: %s", parts[0], parts[1]) + } + + h.Algorithm = parts[0] + h.Hex = parts[1] + return nil +} + +// SHA256 computes the Hash of the provided io.Reader's content. +func SHA256(r io.Reader) (Hash, int64, error) { + hasher := sha256.New() + n, err := io.Copy(hasher, r) + if err != nil { + return Hash{}, 0, err + } + return Hash{ + Algorithm: "sha256", + Hex: hex.EncodeToString(hasher.Sum(make([]byte, 0, hasher.Size()))), + }, n, nil +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/image.go b/vendor/github.com/google/go-containerregistry/pkg/v1/image.go new file mode 100644 index 00000000..05568aae --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/image.go @@ -0,0 +1,58 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + "github.com/google/go-containerregistry/pkg/v1/types" +) + +// Image defines the interface for interacting with an OCI v1 image. +type Image interface { + // Layers returns the ordered collection of filesystem layers that comprise this image. + // The order of the list is oldest/base layer first, and most-recent/top layer last. + Layers() ([]Layer, error) + + // BlobSet returns an unordered collection of all the blobs in the image. + BlobSet() (map[Hash]struct{}, error) + + // MediaType of this image's manifest. + MediaType() (types.MediaType, error) + + // ConfigName returns the hash of the image's config file. + ConfigName() (Hash, error) + + // ConfigFile returns this image's config file. + ConfigFile() (*ConfigFile, error) + + // RawConfigFile returns the serialized bytes of ConfigFile() + RawConfigFile() ([]byte, error) + + // Digest returns the sha256 of this image's manifest. + Digest() (Hash, error) + + // Manifest returns this image's Manifest object. + Manifest() (*Manifest, error) + + // RawManifest returns the serialized bytes of Manifest() + RawManifest() ([]byte, error) + + // LayerByDigest returns a Layer for interacting with a particular layer of + // the image, looking it up by "digest" (the compressed hash). + LayerByDigest(Hash) (Layer, error) + + // LayerByDiffID is an analog to LayerByDigest, looking up by "diff id" + // (the uncompressed hash). + LayerByDiffID(Hash) (Layer, error) +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/index.go b/vendor/github.com/google/go-containerregistry/pkg/v1/index.go new file mode 100644 index 00000000..25ba29ed --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/index.go @@ -0,0 +1,33 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + "github.com/google/go-containerregistry/pkg/v1/types" +) + +type ImageIndex interface { + // MediaType of this image's manifest. + MediaType() (types.MediaType, error) + + // Digest returns the sha256 of this index's manifest. + Digest() (Hash, error) + + // IndexManifest returns this image index's manifest object. + IndexManifest() (*IndexManifest, error) + + // RawIndexManifest returns the serialized bytes of IndexManifest(). + RawIndexManifest() ([]byte, error) +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/layer.go b/vendor/github.com/google/go-containerregistry/pkg/v1/layer.go new file mode 100644 index 00000000..8b5091e4 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/layer.go @@ -0,0 +1,37 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + "io" +) + +// Layer is an interface for accessing the properties of a particular layer of a v1.Image +type Layer interface { + // Digest returns the Hash of the compressed layer. + Digest() (Hash, error) + + // DiffID returns the Hash of the uncompressed layer. + DiffID() (Hash, error) + + // Compressed returns an io.ReadCloser for the compressed layer contents. + Compressed() (io.ReadCloser, error) + + // Uncompressed returns an io.ReadCloser for the uncompressed layer contents. + Uncompressed() (io.ReadCloser, error) + + // Size returns the compressed size of the Layer. + Size() (int64, error) +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/manifest.go b/vendor/github.com/google/go-containerregistry/pkg/v1/manifest.go new file mode 100644 index 00000000..932ae056 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/manifest.go @@ -0,0 +1,67 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + "encoding/json" + "io" + + "github.com/google/go-containerregistry/pkg/v1/types" +) + +// Manifest represents the OCI image manifest in a structured way. +type Manifest struct { + SchemaVersion int64 `json:"schemaVersion"` + MediaType types.MediaType `json:"mediaType"` + Config Descriptor `json:"config"` + Layers []Descriptor `json:"layers"` + Annotations map[string]string `json:"annotations,omitempty"` +} + +// IndexManifest represents an OCI image index in a structured way. +type IndexManifest struct { + SchemaVersion int64 `json:"schemaVersion"` + MediaType types.MediaType `json:"mediaType,omitempty"` + Manifests []Descriptor `json:"manifests"` + Annotations map[string]string `json:"annotations,omitempty"` +} + +// Descriptor holds a reference from the manifest to one of its constituent elements. +type Descriptor struct { + MediaType types.MediaType `json:"mediaType"` + Size int64 `json:"size"` + Digest Hash `json:"digest"` + URLs []string `json:"urls,omitempty"` + Annotations map[string]string `json:"annotations,omitempty"` + Platform *Platform `json:"platform,omitempty"` +} + +// ParseManifest parses the io.Reader's contents into a Manifest. +func ParseManifest(r io.Reader) (*Manifest, error) { + m := Manifest{} + if err := json.NewDecoder(r).Decode(&m); err != nil { + return nil, err + } + return &m, nil +} + +// ParseIndexManifest parses the io.Reader's contents into an IndexManifest. +func ParseIndexManifest(r io.Reader) (*IndexManifest, error) { + im := IndexManifest{} + if err := json.NewDecoder(r).Decode(&im); err != nil { + return nil, err + } + return &im, nil +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/partial/compressed.go b/vendor/github.com/google/go-containerregistry/pkg/v1/partial/compressed.go new file mode 100644 index 00000000..e6e4f4d4 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/partial/compressed.go @@ -0,0 +1,159 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package partial + +import ( + "io" + + "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/v1util" +) + +// CompressedLayer represents the bare minimum interface a natively +// compressed layer must implement for us to produce a v1.Layer +type CompressedLayer interface { + // Digest returns the Hash of the compressed layer. + Digest() (v1.Hash, error) + + // Compressed returns an io.ReadCloser for the compressed layer contents. + Compressed() (io.ReadCloser, error) + + // Size returns the compressed size of the Layer. + Size() (int64, error) +} + +// compressedLayerExtender implements v1.Image using the compressed base properties. +type compressedLayerExtender struct { + CompressedLayer +} + +// Uncompressed implements v1.Layer +func (ule *compressedLayerExtender) Uncompressed() (io.ReadCloser, error) { + u, err := ule.Compressed() + if err != nil { + return nil, err + } + return v1util.GunzipReadCloser(u) +} + +// DiffID implements v1.Layer +func (ule *compressedLayerExtender) DiffID() (v1.Hash, error) { + // If our nested CompressedLayer implements DiffID, + // then delegate to it instead. + if wdi, ok := ule.CompressedLayer.(WithDiffID); ok { + return wdi.DiffID() + } + r, err := ule.Uncompressed() + if err != nil { + return v1.Hash{}, err + } + defer r.Close() + h, _, err := v1.SHA256(r) + return h, err +} + +// CompressedToLayer fills in the missing methods from a CompressedLayer so that it implements v1.Layer +func CompressedToLayer(ul CompressedLayer) (v1.Layer, error) { + return &compressedLayerExtender{ul}, nil +} + +// CompressedImageCore represents the base minimum interface a natively +// compressed image must implement for us to produce a v1.Image. +type CompressedImageCore interface { + imageCore + + // RawManifest returns the serialized bytes of the manifest. + RawManifest() ([]byte, error) + + // LayerByDigest is a variation on the v1.Image method, which returns + // a CompressedLayer instead. + LayerByDigest(v1.Hash) (CompressedLayer, error) +} + +// compressedImageExtender implements v1.Image by extending CompressedImageCore with the +// appropriate methods computed from the minimal core. +type compressedImageExtender struct { + CompressedImageCore +} + +// Assert that our extender type completes the v1.Image interface +var _ v1.Image = (*compressedImageExtender)(nil) + +// BlobSet implements v1.Image +func (i *compressedImageExtender) BlobSet() (map[v1.Hash]struct{}, error) { + return BlobSet(i) +} + +// Digest implements v1.Image +func (i *compressedImageExtender) Digest() (v1.Hash, error) { + return Digest(i) +} + +// ConfigName implements v1.Image +func (i *compressedImageExtender) ConfigName() (v1.Hash, error) { + return ConfigName(i) +} + +// Layers implements v1.Image +func (i *compressedImageExtender) Layers() ([]v1.Layer, error) { + hs, err := FSLayers(i) + if err != nil { + return nil, err + } + ls := make([]v1.Layer, 0, len(hs)) + for _, h := range hs { + l, err := i.LayerByDigest(h) + if err != nil { + return nil, err + } + ls = append(ls, l) + } + return ls, nil +} + +// LayerByDigest implements v1.Image +func (i *compressedImageExtender) LayerByDigest(h v1.Hash) (v1.Layer, error) { + cl, err := i.CompressedImageCore.LayerByDigest(h) + if err != nil { + return nil, err + } + return CompressedToLayer(cl) +} + +// LayerByDiffID implements v1.Image +func (i *compressedImageExtender) LayerByDiffID(h v1.Hash) (v1.Layer, error) { + h, err := DiffIDToBlob(i, h) + if err != nil { + return nil, err + } + return i.LayerByDigest(h) +} + +// ConfigFile implements v1.Image +func (i *compressedImageExtender) ConfigFile() (*v1.ConfigFile, error) { + return ConfigFile(i) +} + +// Manifest implements v1.Image +func (i *compressedImageExtender) Manifest() (*v1.Manifest, error) { + return Manifest(i) +} + +// CompressedToImage fills in the missing methods from a CompressedImageCore so that it implements v1.Image +func CompressedToImage(cic CompressedImageCore) (v1.Image, error) { + return &compressedImageExtender{ + CompressedImageCore: cic, + }, nil +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/partial/doc.go b/vendor/github.com/google/go-containerregistry/pkg/v1/partial/doc.go new file mode 100644 index 00000000..153dfe4d --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/partial/doc.go @@ -0,0 +1,17 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package partial defines methods for building up a v1.Image from +// minimal subsets that are sufficient for defining a v1.Image. +package partial diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/partial/image.go b/vendor/github.com/google/go-containerregistry/pkg/v1/partial/image.go new file mode 100644 index 00000000..5d6da39d --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/partial/image.go @@ -0,0 +1,28 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package partial + +import ( + "github.com/google/go-containerregistry/pkg/v1/types" +) + +// imageCore is the core set of properties without which we cannot build a v1.Image +type imageCore interface { + // RawConfigFile returns the serialized bytes of this image's config file. + RawConfigFile() ([]byte, error) + + // MediaType of this image's manifest. + MediaType() (types.MediaType, error) +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/partial/uncompressed.go b/vendor/github.com/google/go-containerregistry/pkg/v1/partial/uncompressed.go new file mode 100644 index 00000000..cedb1997 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/partial/uncompressed.go @@ -0,0 +1,228 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package partial + +import ( + "bytes" + "io" + "sync" + + "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/types" + "github.com/google/go-containerregistry/pkg/v1/v1util" +) + +// UncompressedLayer represents the bare minimum interface a natively +// uncompressed layer must implement for us to produce a v1.Layer +type UncompressedLayer interface { + // DiffID returns the Hash of the uncompressed layer. + DiffID() (v1.Hash, error) + + // Uncompressed returns an io.ReadCloser for the uncompressed layer contents. + Uncompressed() (io.ReadCloser, error) +} + +// uncompressedLayerExtender implements v1.Image using the uncompressed base properties. +type uncompressedLayerExtender struct { + UncompressedLayer + // Memoize size/hash so that the methods aren't twice as + // expensive as doing this manually. + hash v1.Hash + size int64 + hashSizeError error + once sync.Once +} + +// Compressed implements v1.Layer +func (ule *uncompressedLayerExtender) Compressed() (io.ReadCloser, error) { + u, err := ule.Uncompressed() + if err != nil { + return nil, err + } + return v1util.GzipReadCloser(u) +} + +// Digest implements v1.Layer +func (ule *uncompressedLayerExtender) Digest() (v1.Hash, error) { + ule.calcSizeHash() + return ule.hash, ule.hashSizeError +} + +// Size implements v1.Layer +func (ule *uncompressedLayerExtender) Size() (int64, error) { + ule.calcSizeHash() + return ule.size, ule.hashSizeError +} + +func (ule *uncompressedLayerExtender) calcSizeHash() { + ule.once.Do(func() { + var r io.ReadCloser + r, ule.hashSizeError = ule.Compressed() + if ule.hashSizeError != nil { + return + } + defer r.Close() + ule.hash, ule.size, ule.hashSizeError = v1.SHA256(r) + }) +} + +// UncompressedToLayer fills in the missing methods from an UncompressedLayer so that it implements v1.Layer +func UncompressedToLayer(ul UncompressedLayer) (v1.Layer, error) { + return &uncompressedLayerExtender{UncompressedLayer: ul}, nil +} + +// UncompressedImageCore represents the bare minimum interface a natively +// uncompressed image must implement for us to produce a v1.Image +type UncompressedImageCore interface { + imageCore + + // LayerByDiffID is a variation on the v1.Image method, which returns + // an UncompressedLayer instead. + LayerByDiffID(v1.Hash) (UncompressedLayer, error) +} + +// UncompressedToImage fills in the missing methods from an UncompressedImageCore so that it implements v1.Image. +func UncompressedToImage(uic UncompressedImageCore) (v1.Image, error) { + return &uncompressedImageExtender{ + UncompressedImageCore: uic, + }, nil +} + +// uncompressedImageExtender implements v1.Image by extending UncompressedImageCore with the +// appropriate methods computed from the minimal core. +type uncompressedImageExtender struct { + UncompressedImageCore + + lock sync.Mutex + manifest *v1.Manifest +} + +// Assert that our extender type completes the v1.Image interface +var _ v1.Image = (*uncompressedImageExtender)(nil) + +// BlobSet implements v1.Image +func (i *uncompressedImageExtender) BlobSet() (map[v1.Hash]struct{}, error) { + return BlobSet(i) +} + +// Digest implements v1.Image +func (i *uncompressedImageExtender) Digest() (v1.Hash, error) { + return Digest(i) +} + +// Manifest implements v1.Image +func (i *uncompressedImageExtender) Manifest() (*v1.Manifest, error) { + i.lock.Lock() + defer i.lock.Unlock() + if i.manifest != nil { + return i.manifest, nil + } + + b, err := i.RawConfigFile() + if err != nil { + return nil, err + } + + cfgHash, cfgSize, err := v1.SHA256(bytes.NewReader(b)) + if err != nil { + return nil, err + } + + m := &v1.Manifest{ + SchemaVersion: 2, + MediaType: types.DockerManifestSchema2, + Config: v1.Descriptor{ + MediaType: types.DockerConfigJSON, + Size: cfgSize, + Digest: cfgHash, + }, + } + + ls, err := i.Layers() + if err != nil { + return nil, err + } + + m.Layers = make([]v1.Descriptor, len(ls)) + for i, l := range ls { + sz, err := l.Size() + if err != nil { + return nil, err + } + h, err := l.Digest() + if err != nil { + return nil, err + } + + m.Layers[i] = v1.Descriptor{ + MediaType: types.DockerLayer, + Size: sz, + Digest: h, + } + } + + i.manifest = m + return i.manifest, nil +} + +// RawManifest implements v1.Image +func (i *uncompressedImageExtender) RawManifest() ([]byte, error) { + return RawManifest(i) +} + +// ConfigName implements v1.Image +func (i *uncompressedImageExtender) ConfigName() (v1.Hash, error) { + return ConfigName(i) +} + +// ConfigFile implements v1.Image +func (i *uncompressedImageExtender) ConfigFile() (*v1.ConfigFile, error) { + return ConfigFile(i) +} + +// Layers implements v1.Image +func (i *uncompressedImageExtender) Layers() ([]v1.Layer, error) { + diffIDs, err := DiffIDs(i) + if err != nil { + return nil, err + } + ls := make([]v1.Layer, 0, len(diffIDs)) + for _, h := range diffIDs { + l, err := i.LayerByDiffID(h) + if err != nil { + return nil, err + } + ls = append(ls, l) + } + return ls, nil +} + +// LayerByDiffID implements v1.Image +func (i *uncompressedImageExtender) LayerByDiffID(diffID v1.Hash) (v1.Layer, error) { + ul, err := i.UncompressedImageCore.LayerByDiffID(diffID) + if err != nil { + return nil, err + } + return UncompressedToLayer(ul) +} + +// LayerByDigest implements v1.Image +func (i *uncompressedImageExtender) LayerByDigest(h v1.Hash) (v1.Layer, error) { + diffID, err := BlobToDiffID(i, h) + if err != nil { + return nil, err + } + return i.LayerByDiffID(diffID) +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/partial/with.go b/vendor/github.com/google/go-containerregistry/pkg/v1/partial/with.go new file mode 100644 index 00000000..bc6fd8e9 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/partial/with.go @@ -0,0 +1,292 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package partial + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + + "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/v1util" +) + +// WithRawConfigFile defines the subset of v1.Image used by these helper methods +type WithRawConfigFile interface { + // RawConfigFile returns the serialized bytes of this image's config file. + RawConfigFile() ([]byte, error) +} + +// ConfigFile is a helper for implementing v1.Image +func ConfigFile(i WithRawConfigFile) (*v1.ConfigFile, error) { + b, err := i.RawConfigFile() + if err != nil { + return nil, err + } + return v1.ParseConfigFile(bytes.NewReader(b)) +} + +// ConfigName is a helper for implementing v1.Image +func ConfigName(i WithRawConfigFile) (v1.Hash, error) { + b, err := i.RawConfigFile() + if err != nil { + return v1.Hash{}, err + } + h, _, err := v1.SHA256(bytes.NewReader(b)) + return h, err +} + +// configLayer implements v1.Layer from the raw config bytes. +// This is so that clients (e.g. remote) can access the config as a blob. +type configLayer struct { + hash v1.Hash + content []byte +} + +// Digest implements v1.Layer +func (cl *configLayer) Digest() (v1.Hash, error) { + return cl.hash, nil +} + +// DiffID implements v1.Layer +func (cl *configLayer) DiffID() (v1.Hash, error) { + return cl.hash, nil +} + +// Uncompressed implements v1.Layer +func (cl *configLayer) Uncompressed() (io.ReadCloser, error) { + return v1util.NopReadCloser(bytes.NewBuffer(cl.content)), nil +} + +// Compressed implements v1.Layer +func (cl *configLayer) Compressed() (io.ReadCloser, error) { + return v1util.NopReadCloser(bytes.NewBuffer(cl.content)), nil +} + +// Size implements v1.Layer +func (cl *configLayer) Size() (int64, error) { + return int64(len(cl.content)), nil +} + +var _ v1.Layer = (*configLayer)(nil) + +func ConfigLayer(i WithRawConfigFile) (v1.Layer, error) { + h, err := ConfigName(i) + if err != nil { + return nil, err + } + rcfg, err := i.RawConfigFile() + if err != nil { + return nil, err + } + return &configLayer{ + hash: h, + content: rcfg, + }, nil +} + +// WithConfigFile defines the subset of v1.Image used by these helper methods +type WithConfigFile interface { + // ConfigFile returns this image's config file. + ConfigFile() (*v1.ConfigFile, error) +} + +// DiffIDs is a helper for implementing v1.Image +func DiffIDs(i WithConfigFile) ([]v1.Hash, error) { + cfg, err := i.ConfigFile() + if err != nil { + return nil, err + } + return cfg.RootFS.DiffIDs, nil +} + +// RawConfigFile is a helper for implementing v1.Image +func RawConfigFile(i WithConfigFile) ([]byte, error) { + cfg, err := i.ConfigFile() + if err != nil { + return nil, err + } + return json.Marshal(cfg) +} + +// WithUncompressedLayer defines the subset of v1.Image used by these helper methods +type WithUncompressedLayer interface { + // UncompressedLayer is like UncompressedBlob, but takes the "diff id". + UncompressedLayer(v1.Hash) (io.ReadCloser, error) +} + +// Layer is the same as Blob, but takes the "diff id". +func Layer(wul WithUncompressedLayer, h v1.Hash) (io.ReadCloser, error) { + rc, err := wul.UncompressedLayer(h) + if err != nil { + return nil, err + } + return v1util.GzipReadCloser(rc) +} + +// WithRawManifest defines the subset of v1.Image used by these helper methods +type WithRawManifest interface { + // RawManifest returns the serialized bytes of this image's config file. + RawManifest() ([]byte, error) +} + +// Digest is a helper for implementing v1.Image +func Digest(i WithRawManifest) (v1.Hash, error) { + mb, err := i.RawManifest() + if err != nil { + return v1.Hash{}, err + } + digest, _, err := v1.SHA256(bytes.NewReader(mb)) + return digest, err +} + +// Manifest is a helper for implementing v1.Image +func Manifest(i WithRawManifest) (*v1.Manifest, error) { + b, err := i.RawManifest() + if err != nil { + return nil, err + } + return v1.ParseManifest(bytes.NewReader(b)) +} + +// WithManifest defines the subset of v1.Image used by these helper methods +type WithManifest interface { + // Manifest returns this image's Manifest object. + Manifest() (*v1.Manifest, error) +} + +// RawManifest is a helper for implementing v1.Image +func RawManifest(i WithManifest) ([]byte, error) { + m, err := i.Manifest() + if err != nil { + return nil, err + } + return json.Marshal(m) +} + +// FSLayers is a helper for implementing v1.Image +func FSLayers(i WithManifest) ([]v1.Hash, error) { + m, err := i.Manifest() + if err != nil { + return nil, err + } + fsl := make([]v1.Hash, len(m.Layers)) + for i, l := range m.Layers { + fsl[i] = l.Digest + } + return fsl, nil +} + +// BlobSet is a helper for implementing v1.Image +func BlobSet(i WithManifest) (map[v1.Hash]struct{}, error) { + m, err := i.Manifest() + if err != nil { + return nil, err + } + bs := make(map[v1.Hash]struct{}) + for _, l := range m.Layers { + bs[l.Digest] = struct{}{} + } + bs[m.Config.Digest] = struct{}{} + return bs, nil +} + +// BlobSize is a helper for implementing v1.Image +func BlobSize(i WithManifest, h v1.Hash) (int64, error) { + m, err := i.Manifest() + if err != nil { + return -1, err + } + for _, l := range m.Layers { + if l.Digest == h { + return l.Size, nil + } + } + return -1, fmt.Errorf("blob %v not found", h) +} + +// WithManifestAndConfigFile defines the subset of v1.Image used by these helper methods +type WithManifestAndConfigFile interface { + WithConfigFile + + // Manifest returns this image's Manifest object. + Manifest() (*v1.Manifest, error) +} + +// BlobToDiffID is a helper for mapping between compressed +// and uncompressed blob hashes. +func BlobToDiffID(i WithManifestAndConfigFile, h v1.Hash) (v1.Hash, error) { + blobs, err := FSLayers(i) + if err != nil { + return v1.Hash{}, err + } + diffIDs, err := DiffIDs(i) + if err != nil { + return v1.Hash{}, err + } + if len(blobs) != len(diffIDs) { + return v1.Hash{}, fmt.Errorf("mismatched fs layers (%d) and diff ids (%d)", len(blobs), len(diffIDs)) + } + for i, blob := range blobs { + if blob == h { + return diffIDs[i], nil + } + } + return v1.Hash{}, fmt.Errorf("unknown blob %v", h) +} + +// DiffIDToBlob is a helper for mapping between uncompressed +// and compressed blob hashes. +func DiffIDToBlob(wm WithManifestAndConfigFile, h v1.Hash) (v1.Hash, error) { + blobs, err := FSLayers(wm) + if err != nil { + return v1.Hash{}, err + } + diffIDs, err := DiffIDs(wm) + if err != nil { + return v1.Hash{}, err + } + if len(blobs) != len(diffIDs) { + return v1.Hash{}, fmt.Errorf("mismatched fs layers (%d) and diff ids (%d)", len(blobs), len(diffIDs)) + } + for i, diffID := range diffIDs { + if diffID == h { + return blobs[i], nil + } + } + return v1.Hash{}, fmt.Errorf("unknown diffID %v", h) + +} + +// WithBlob defines the subset of v1.Image used by these helper methods +type WithBlob interface { + // Blob returns a ReadCloser for streaming the blob's content. + Blob(v1.Hash) (io.ReadCloser, error) +} + +// UncompressedBlob returns a ReadCloser for streaming the blob's content uncompressed. +func UncompressedBlob(b WithBlob, h v1.Hash) (io.ReadCloser, error) { + rc, err := b.Blob(h) + if err != nil { + return nil, err + } + return v1util.GunzipReadCloser(rc) +} + +// WithDiffID defines the subset of v1.Layer for exposing the DiffID method. +type WithDiffID interface { + DiffID() (v1.Hash, error) +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/platform.go b/vendor/github.com/google/go-containerregistry/pkg/v1/platform.go new file mode 100644 index 00000000..df9b2959 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/platform.go @@ -0,0 +1,24 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +// Platform represents the target os/arch for an image. +type Platform struct { + Architecture string `json:"architecture"` + OS string `json:"os"` + OSVersion string `json:"os.version,omitempty"` + OSFeatures []string `json:"os.features,omitempty"` + Variant string `json:"variant,omitempty"` +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/delete.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/delete.go new file mode 100644 index 00000000..2032e276 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/delete.go @@ -0,0 +1,64 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package remote + +import ( + "fmt" + "io/ioutil" + "net/http" + "net/url" + + "github.com/google/go-containerregistry/pkg/authn" + "github.com/google/go-containerregistry/pkg/name" + "github.com/google/go-containerregistry/pkg/v1/remote/transport" +) + +// Delete removes the specified image reference from the remote registry. +func Delete(ref name.Reference, auth authn.Authenticator, t http.RoundTripper) error { + scopes := []string{ref.Scope(transport.DeleteScope)} + tr, err := transport.New(ref.Context().Registry, auth, t, scopes) + if err != nil { + return err + } + c := &http.Client{Transport: tr} + + u := url.URL{ + Scheme: ref.Context().Registry.Scheme(), + Host: ref.Context().RegistryStr(), + Path: fmt.Sprintf("/v2/%s/manifests/%s", ref.Context().RepositoryStr(), ref.Identifier()), + } + + req, err := http.NewRequest(http.MethodDelete, u.String(), nil) + if err != nil { + return err + } + + resp, err := c.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + switch resp.StatusCode { + case http.StatusOK, http.StatusAccepted: + return nil + default: + b, err := ioutil.ReadAll(resp.Body) + if err != nil { + return err + } + return fmt.Errorf("unrecognized status code during DELETE: %v; %v", resp.Status, string(b)) + } +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/doc.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/doc.go new file mode 100644 index 00000000..846ba07c --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/doc.go @@ -0,0 +1,17 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package remote provides facilities for reading/writing v1.Images from/to +// a remote image registry. +package remote diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/error.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/error.go new file mode 100644 index 00000000..07627482 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/error.go @@ -0,0 +1,106 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package remote + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" +) + +// Error implements error to support the following error specification: +// https://github.com/docker/distribution/blob/master/docs/spec/api.md#errors +type Error struct { + Errors []Diagnostic `json:"errors,omitempty"` +} + +// Check that Error implements error +var _ error = (*Error)(nil) + +// Error implements error +func (e *Error) Error() string { + switch len(e.Errors) { + case 0: + return "" + case 1: + return e.Errors[0].String() + default: + var errors []string + for _, d := range e.Errors { + errors = append(errors, d.String()) + } + return fmt.Sprintf("multiple errors returned: %s", + strings.Join(errors, ";")) + } +} + +// Diagnostic represents a single error returned by a Docker registry interaction. +type Diagnostic struct { + Code ErrorCode `json:"code"` + Message string `json:"message,omitempty"` + Detail interface{} `json:"detail,omitempty"` +} + +// String stringifies the Diagnostic +func (d Diagnostic) String() string { + return fmt.Sprintf("%s: %q", d.Code, d.Message) +} + +// ErrorCode is an enumeration of supported error codes. +type ErrorCode string + +// The set of error conditions a registry may return: +// https://github.com/docker/distribution/blob/master/docs/spec/api.md#errors-2 +const ( + BlobUnknownErrorCode ErrorCode = "BLOB_UNKNOWN" + BlobUploadInvalidErrorCode ErrorCode = "BLOB_UPLOAD_INVALID" + BlobUploadUnknownErrorCode ErrorCode = "BLOB_UPLOAD_UNKNOWN" + DigestInvalidErrorCode ErrorCode = "DIGEST_INVALID" + ManifestBlobUnknownErrorCode ErrorCode = "MANIFEST_BLOB_UNKNOWN" + ManifestInvalidErrorCode ErrorCode = "MANIFEST_INVALID" + ManifestUnknownErrorCode ErrorCode = "MANIFEST_UNKNOWN" + ManifestUnverifiedErrorCode ErrorCode = "MANIFEST_UNVERIFIED" + NameInvalidErrorCode ErrorCode = "NAME_INVALID" + NameUnknownErrorCode ErrorCode = "NAME_UNKNOWN" + SizeInvalidErrorCode ErrorCode = "SIZE_INVALID" + TagInvalidErrorCode ErrorCode = "TAG_INVALID" + UnauthorizedErrorCode ErrorCode = "UNAUTHORIZED" + DeniedErrorCode ErrorCode = "DENIED" + UnsupportedErrorCode ErrorCode = "UNSUPPORTED" +) + +func CheckError(resp *http.Response, codes ...int) error { + for _, code := range codes { + if resp.StatusCode == code { + // This is one of the supported status codes. + return nil + } + } + b, err := ioutil.ReadAll(resp.Body) + if err != nil { + return err + } + + // https://github.com/docker/distribution/blob/master/docs/spec/api.md#errors + var structuredError Error + if err := json.Unmarshal(b, &structuredError); err != nil { + // If the response isn't an unstructured error, then return some + // reasonable error response containing the response body. + return fmt.Errorf("unsupported status code %d; body: %s", resp.StatusCode, string(b)) + } + return &structuredError +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/image.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/image.go new file mode 100644 index 00000000..1c963ec8 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/image.go @@ -0,0 +1,246 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package remote + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "sync" + + "github.com/google/go-containerregistry/pkg/authn" + "github.com/google/go-containerregistry/pkg/name" + "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/partial" + "github.com/google/go-containerregistry/pkg/v1/remote/transport" + "github.com/google/go-containerregistry/pkg/v1/types" + "github.com/google/go-containerregistry/pkg/v1/v1util" +) + +// remoteImage accesses an image from a remote registry +type remoteImage struct { + ref name.Reference + client *http.Client + manifestLock sync.Mutex // Protects manifest + manifest []byte + configLock sync.Mutex // Protects config + config []byte +} + +type ImageOption func(*imageOpener) error + +var _ partial.CompressedImageCore = (*remoteImage)(nil) + +type imageOpener struct { + auth authn.Authenticator + transport http.RoundTripper + ref name.Reference + client *http.Client +} + +func (i *imageOpener) Open() (v1.Image, error) { + tr, err := transport.New(i.ref.Context().Registry, i.auth, i.transport, []string{i.ref.Scope(transport.PullScope)}) + if err != nil { + return nil, err + } + ri := &remoteImage{ + ref: i.ref, + client: &http.Client{Transport: tr}, + } + imgCore, err := partial.CompressedToImage(ri) + if err != nil { + return imgCore, err + } + // Wrap the v1.Layers returned by this v1.Image in a hint for downstream + // remote.Write calls to facilitate cross-repo "mounting". + return &mountableImage{ + Image: imgCore, + Reference: i.ref, + }, nil +} + +// Image provides access to a remote image reference, applying functional options +// to the underlying imageOpener before resolving the reference into a v1.Image. +func Image(ref name.Reference, options ...ImageOption) (v1.Image, error) { + img := &imageOpener{ + auth: authn.Anonymous, + transport: http.DefaultTransport, + ref: ref, + } + + for _, option := range options { + if err := option(img); err != nil { + return nil, err + } + } + return img.Open() +} + +func (r *remoteImage) url(resource, identifier string) url.URL { + return url.URL{ + Scheme: r.ref.Context().Registry.Scheme(), + Host: r.ref.Context().RegistryStr(), + Path: fmt.Sprintf("/v2/%s/%s/%s", r.ref.Context().RepositoryStr(), resource, identifier), + } +} + +func (r *remoteImage) MediaType() (types.MediaType, error) { + // TODO(jonjohnsonjr): Determine this based on response. + return types.DockerManifestSchema2, nil +} + +// TODO(jonjohnsonjr): Handle manifest lists. +func (r *remoteImage) RawManifest() ([]byte, error) { + r.manifestLock.Lock() + defer r.manifestLock.Unlock() + if r.manifest != nil { + return r.manifest, nil + } + + u := r.url("manifests", r.ref.Identifier()) + req, err := http.NewRequest(http.MethodGet, u.String(), nil) + if err != nil { + return nil, err + } + // TODO(jonjohnsonjr): Accept OCI manifest, manifest list, and image index. + req.Header.Set("Accept", string(types.DockerManifestSchema2)) + resp, err := r.client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if err := CheckError(resp, http.StatusOK); err != nil { + return nil, err + } + + manifest, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + digest, _, err := v1.SHA256(bytes.NewReader(manifest)) + if err != nil { + return nil, err + } + + // Validate the digest matches what we asked for, if pulling by digest. + if dgst, ok := r.ref.(name.Digest); ok { + if digest.String() != dgst.DigestStr() { + return nil, fmt.Errorf("manifest digest: %q does not match requested digest: %q for %q", digest, dgst.DigestStr(), r.ref) + } + } else { + // Do nothing for tags; I give up. + // + // We'd like to validate that the "Docker-Content-Digest" header matches what is returned by the registry, + // but so many registries implement this incorrectly that it's not worth checking. + // + // For reference: + // https://github.com/docker/distribution/issues/2395 + // https://github.com/GoogleContainerTools/kaniko/issues/298 + } + + r.manifest = manifest + return r.manifest, nil +} + +func (r *remoteImage) RawConfigFile() ([]byte, error) { + r.configLock.Lock() + defer r.configLock.Unlock() + if r.config != nil { + return r.config, nil + } + + m, err := partial.Manifest(r) + if err != nil { + return nil, err + } + + cl, err := r.LayerByDigest(m.Config.Digest) + if err != nil { + return nil, err + } + body, err := cl.Compressed() + if err != nil { + return nil, err + } + defer body.Close() + + r.config, err = ioutil.ReadAll(body) + if err != nil { + return nil, err + } + return r.config, nil +} + +// remoteLayer implements partial.CompressedLayer +type remoteLayer struct { + ri *remoteImage + digest v1.Hash +} + +// Digest implements partial.CompressedLayer +func (rl *remoteLayer) Digest() (v1.Hash, error) { + return rl.digest, nil +} + +// Compressed implements partial.CompressedLayer +func (rl *remoteLayer) Compressed() (io.ReadCloser, error) { + u := rl.ri.url("blobs", rl.digest.String()) + resp, err := rl.ri.client.Get(u.String()) + if err != nil { + return nil, err + } + + if err := CheckError(resp, http.StatusOK); err != nil { + resp.Body.Close() + return nil, err + } + + return v1util.VerifyReadCloser(resp.Body, rl.digest) +} + +// Manifest implements partial.WithManifest so that we can use partial.BlobSize below. +func (rl *remoteLayer) Manifest() (*v1.Manifest, error) { + return partial.Manifest(rl.ri) +} + +// Size implements partial.CompressedLayer +func (rl *remoteLayer) Size() (int64, error) { + // Look up the size of this digest in the manifest to avoid a request. + return partial.BlobSize(rl, rl.digest) +} + +// ConfigFile implements partial.WithManifestAndConfigFile so that we can use partial.BlobToDiffID below. +func (rl *remoteLayer) ConfigFile() (*v1.ConfigFile, error) { + return partial.ConfigFile(rl.ri) +} + +// DiffID implements partial.WithDiffID so that we don't recompute a DiffID that we already have +// available in our ConfigFile. +func (rl *remoteLayer) DiffID() (v1.Hash, error) { + return partial.BlobToDiffID(rl, rl.digest) +} + +// LayerByDigest implements partial.CompressedLayer +func (r *remoteImage) LayerByDigest(h v1.Hash) (partial.CompressedLayer, error) { + return &remoteLayer{ + ri: r, + digest: h, + }, nil +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/list.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/list.go new file mode 100644 index 00000000..17c00b5e --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/list.go @@ -0,0 +1,64 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package remote + +import ( + "encoding/json" + "fmt" + "net/http" + "net/url" + + "github.com/google/go-containerregistry/pkg/authn" + "github.com/google/go-containerregistry/pkg/name" + "github.com/google/go-containerregistry/pkg/v1/remote/transport" +) + +type Tags struct { + Name string `json:"name"` + Tags []string `json:"tags"` +} + +// TODO(jonjohnsonjr): return []name.Tag? +func List(repo name.Repository, auth authn.Authenticator, t http.RoundTripper) ([]string, error) { + scopes := []string{repo.Scope(transport.PullScope)} + tr, err := transport.New(repo.Registry, auth, t, scopes) + if err != nil { + return nil, err + } + + uri := url.URL{ + Scheme: repo.Registry.Scheme(), + Host: repo.Registry.RegistryStr(), + Path: fmt.Sprintf("/v2/%s/tags/list", repo.RepositoryStr()), + } + + client := http.Client{Transport: tr} + resp, err := client.Get(uri.String()) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if err := CheckError(resp, http.StatusOK); err != nil { + return nil, err + } + + tags := Tags{} + if err := json.NewDecoder(resp.Body).Decode(&tags); err != nil { + return nil, err + } + + return tags.Tags, nil +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/mount.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/mount.go new file mode 100644 index 00000000..13b79064 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/mount.go @@ -0,0 +1,77 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package remote + +import ( + "github.com/google/go-containerregistry/pkg/name" + "github.com/google/go-containerregistry/pkg/v1" +) + +// MountableLayer wraps a v1.Layer in a shim that enables the layer to be +// "mounted" when published to another registry. +type MountableLayer struct { + v1.Layer + + Reference name.Reference +} + +// mountableImage wraps the v1.Layer references returned by the embedded v1.Image +// in MountableLayer's so that remote.Write might attempt to mount them from their +// source repository. +type mountableImage struct { + v1.Image + + Reference name.Reference +} + +// Layers implements v1.Image +func (mi *mountableImage) Layers() ([]v1.Layer, error) { + ls, err := mi.Image.Layers() + if err != nil { + return nil, err + } + mls := make([]v1.Layer, 0, len(ls)) + for _, l := range ls { + mls = append(mls, &MountableLayer{ + Layer: l, + Reference: mi.Reference, + }) + } + return mls, nil +} + +// LayerByDigest implements v1.Image +func (mi *mountableImage) LayerByDigest(d v1.Hash) (v1.Layer, error) { + l, err := mi.Image.LayerByDigest(d) + if err != nil { + return nil, err + } + return &MountableLayer{ + Layer: l, + Reference: mi.Reference, + }, nil +} + +// LayerByDiffID implements v1.Image +func (mi *mountableImage) LayerByDiffID(d v1.Hash) (v1.Layer, error) { + l, err := mi.Image.LayerByDiffID(d) + if err != nil { + return nil, err + } + return &MountableLayer{ + Layer: l, + Reference: mi.Reference, + }, nil +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/options.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/options.go new file mode 100644 index 00000000..a6e9584e --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/options.go @@ -0,0 +1,56 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package remote + +import ( + "log" + "net/http" + + "github.com/google/go-containerregistry/pkg/authn" +) + +// WithTransport is a functional option for overriding the default transport +// on a remote image +func WithTransport(t http.RoundTripper) ImageOption { + return func(i *imageOpener) error { + i.transport = t + return nil + } +} + +// WithAuth is a functional option for overriding the default authenticator +// on a remote image +func WithAuth(auth authn.Authenticator) ImageOption { + return func(i *imageOpener) error { + i.auth = auth + return nil + } +} + +// WithAuthFromKeychain is a functional option for overriding the default +// authenticator on a remote image using an authn.Keychain +func WithAuthFromKeychain(keys authn.Keychain) ImageOption { + return func(i *imageOpener) error { + auth, err := keys.Resolve(i.ref.Context().Registry) + if err != nil { + return err + } + if auth == authn.Anonymous { + log.Println("No matching credentials were found, falling back on anonymous") + } + i.auth = auth + return nil + } +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/basic.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/basic.go new file mode 100644 index 00000000..e77f47f6 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/basic.go @@ -0,0 +1,48 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "net/http" + + "github.com/google/go-containerregistry/pkg/authn" +) + +type basicTransport struct { + inner http.RoundTripper + auth authn.Authenticator + target string +} + +var _ http.RoundTripper = (*basicTransport)(nil) + +// RoundTrip implements http.RoundTripper +func (bt *basicTransport) RoundTrip(in *http.Request) (*http.Response, error) { + hdr, err := bt.auth.Authorization() + if err != nil { + return nil, err + } + + // http.Client handles redirects at a layer above the http.RoundTripper + // abstraction, so to avoid forwarding Authorization headers to places + // we are redirected, only set it when the authorization header matches + // the host with which we are interacting. + // In case of redirect http.Client can use an empty Host, check URL too. + if in.Host == bt.target || in.URL.Host == bt.target { + in.Header.Set("Authorization", hdr) + } + in.Header.Set("User-Agent", transportName) + return bt.inner.RoundTrip(in) +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/bearer.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/bearer.go new file mode 100644 index 00000000..2bfdb6e2 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/bearer.go @@ -0,0 +1,135 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "fmt" + + "encoding/json" + "io/ioutil" + "net/http" + "net/url" + + "github.com/google/go-containerregistry/pkg/authn" + "github.com/google/go-containerregistry/pkg/name" +) + +type bearerTransport struct { + // Wrapped by bearerTransport. + inner http.RoundTripper + // Basic credentials that we exchange for bearer tokens. + basic authn.Authenticator + // Holds the bearer response from the token service. + bearer *authn.Bearer + // Registry to which we send bearer tokens. + registry name.Registry + // See https://tools.ietf.org/html/rfc6750#section-3 + realm string + // See https://docs.docker.com/registry/spec/auth/token/ + service string + scopes []string +} + +var _ http.RoundTripper = (*bearerTransport)(nil) + +// RoundTrip implements http.RoundTripper +func (bt *bearerTransport) RoundTrip(in *http.Request) (*http.Response, error) { + sendRequest := func() (*http.Response, error) { + hdr, err := bt.bearer.Authorization() + if err != nil { + return nil, err + } + + // http.Client handles redirects at a layer above the http.RoundTripper + // abstraction, so to avoid forwarding Authorization headers to places + // we are redirected, only set it when the authorization header matches + // the registry with which we are interacting. + // In case of redirect http.Client can use an empty Host, check URL too. + if in.Host == bt.registry.RegistryStr() || in.URL.Host == bt.registry.RegistryStr() { + in.Header.Set("Authorization", hdr) + } + in.Header.Set("User-Agent", transportName) + return bt.inner.RoundTrip(in) + } + + res, err := sendRequest() + if err != nil { + return nil, err + } + + // Perform a token refresh() and retry the request in case the token has expired + if res.StatusCode == http.StatusUnauthorized { + if err = bt.refresh(); err != nil { + return nil, err + } + return sendRequest() + } + + return res, err +} + +func (bt *bearerTransport) refresh() error { + u, err := url.Parse(bt.realm) + if err != nil { + return err + } + b := &basicTransport{ + inner: bt.inner, + auth: bt.basic, + target: u.Host, + } + client := http.Client{Transport: b} + + u.RawQuery = url.Values{ + "scope": bt.scopes, + "service": []string{bt.service}, + }.Encode() + + resp, err := client.Get(u.String()) + if err != nil { + return err + } + defer resp.Body.Close() + + content, err := ioutil.ReadAll(resp.Body) + if err != nil { + return err + } + + // Some registries don't have "token" in the response. See #54. + type tokenResponse struct { + Token string `json:"token"` + AccessToken string `json:"access_token"` + } + + var response tokenResponse + if err := json.Unmarshal(content, &response); err != nil { + return err + } + + // Find a token to turn into a Bearer authenticator + var bearer authn.Bearer + if response.Token != "" { + bearer = authn.Bearer{Token: response.Token} + } else if response.AccessToken != "" { + bearer = authn.Bearer{Token: response.AccessToken} + } else { + return fmt.Errorf("no token in bearer response:\n%s", content) + } + + // Replace our old bearer authenticator (if we had one) with our newly refreshed authenticator. + bt.bearer = &bearer + return nil +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/doc.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/doc.go new file mode 100644 index 00000000..ff7025b5 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/doc.go @@ -0,0 +1,18 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package transport provides facilities for setting up an authenticated +// http.RoundTripper given an Authenticator and base RoundTripper. See +// transport.New for more information. +package transport diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/ping.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/ping.go new file mode 100644 index 00000000..89133e32 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/ping.go @@ -0,0 +1,93 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "fmt" + "net/http" + "strings" + + "github.com/google/go-containerregistry/pkg/name" +) + +type challenge string + +const ( + anonymous challenge = "anonymous" + basic challenge = "basic" + bearer challenge = "bearer" +) + +type pingResp struct { + challenge challenge + + // Following the challenge there are often key/value pairs + // e.g. Bearer service="gcr.io",realm="https://auth.gcr.io/v36/tokenz" + parameters map[string]string +} + +func (c challenge) Canonical() challenge { + return challenge(strings.ToLower(string(c))) +} + +func parseChallenge(suffix string) map[string]string { + kv := make(map[string]string) + for _, token := range strings.Split(suffix, ",") { + // Trim any whitespace around each token. + token = strings.Trim(token, " ") + + // Break the token into a key/value pair + if parts := strings.SplitN(token, "=", 2); len(parts) == 2 { + // Unquote the value, if it is quoted. + kv[parts[0]] = strings.Trim(parts[1], `"`) + } else { + // If there was only one part, treat is as a key with an empty value + kv[token] = "" + } + } + return kv +} + +func ping(reg name.Registry, t http.RoundTripper) (*pingResp, error) { + client := http.Client{Transport: t} + + url := fmt.Sprintf("%s://%s/v2/", reg.Scheme(), reg.Name()) + resp, err := client.Get(url) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + switch resp.StatusCode { + case http.StatusOK: + // If we get a 200, then no authentication is needed. + return &pingResp{challenge: anonymous}, nil + case http.StatusUnauthorized: + wac := resp.Header.Get(http.CanonicalHeaderKey("WWW-Authenticate")) + if parts := strings.SplitN(wac, " ", 2); len(parts) == 2 { + // If there are two parts, then parse the challenge parameters. + return &pingResp{ + challenge: challenge(parts[0]).Canonical(), + parameters: parseChallenge(parts[1]), + }, nil + } + // Otherwise, just return the challenge without parameters. + return &pingResp{ + challenge: challenge(wac).Canonical(), + }, nil + default: + return nil, fmt.Errorf("unrecognized HTTP status: %v", resp.Status) + } +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/scope.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/scope.go new file mode 100644 index 00000000..c3b56f7a --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/scope.go @@ -0,0 +1,24 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +// Scopes suitable to qualify each Repository +const ( + PullScope string = "pull" + PushScope string = "push,pull" + // For now DELETE is PUSH, which is the read/write ACL. + DeleteScope string = PushScope + CatalogScope string = "catalog" +) diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/transport.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/transport.go new file mode 100644 index 00000000..6140ab2c --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/transport.go @@ -0,0 +1,84 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "fmt" + "net/http" + + "github.com/google/go-containerregistry/pkg/authn" + "github.com/google/go-containerregistry/pkg/name" +) + +const ( + transportName = "go-containerregistry" +) + +// New returns a new RoundTripper based on the provided RoundTripper that has been +// setup to authenticate with the remote registry "reg", in the capacity +// laid out by the specified scopes. +func New(reg name.Registry, auth authn.Authenticator, t http.RoundTripper, scopes []string) (http.RoundTripper, error) { + // The handshake: + // 1. Use "t" to ping() the registry for the authentication challenge. + // + // 2a. If we get back a 200, then simply use "t". + // + // 2b. If we get back a 401 with a Basic challenge, then use a transport + // that just attachs auth each roundtrip. + // + // 2c. If we get back a 401 with a Bearer challenge, then use a transport + // that attaches a bearer token to each request, and refreshes is on 401s. + // Perform an initial refresh to seed the bearer token. + + // First we ping the registry to determine the parameters of the authentication handshake + // (if one is even necessary). + pr, err := ping(reg, t) + if err != nil { + return nil, err + } + + switch pr.challenge.Canonical() { + case anonymous: + return t, nil + case basic: + return &basicTransport{inner: t, auth: auth, target: reg.RegistryStr()}, nil + case bearer: + // We require the realm, which tells us where to send our Basic auth to turn it into Bearer auth. + realm, ok := pr.parameters["realm"] + if !ok { + return nil, fmt.Errorf("malformed www-authenticate, missing realm: %v", pr.parameters) + } + service, ok := pr.parameters["service"] + if !ok { + // If the service parameter is not specified, then default it to the registry + // with which we are talking. + service = reg.String() + } + bt := &bearerTransport{ + inner: t, + basic: auth, + realm: realm, + registry: reg, + service: service, + scopes: scopes, + } + if err := bt.refresh(); err != nil { + return nil, err + } + return bt, nil + default: + return nil, fmt.Errorf("Unrecognized challenge: %s", pr.challenge) + } +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/write.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/write.go new file mode 100644 index 00000000..7d5b5759 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/write.go @@ -0,0 +1,355 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package remote + +import ( + "bytes" + "errors" + "fmt" + "io" + "log" + "net/http" + "net/url" + + "github.com/google/go-containerregistry/pkg/authn" + "github.com/google/go-containerregistry/pkg/name" + "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/partial" + "github.com/google/go-containerregistry/pkg/v1/remote/transport" + "github.com/google/go-containerregistry/pkg/v1/stream" + "golang.org/x/sync/errgroup" +) + +// Write pushes the provided img to the specified image reference. +func Write(ref name.Reference, img v1.Image, auth authn.Authenticator, t http.RoundTripper) error { + ls, err := img.Layers() + if err != nil { + return err + } + + scopes := scopesForUploadingImage(ref, ls) + tr, err := transport.New(ref.Context().Registry, auth, t, scopes) + if err != nil { + return err + } + w := writer{ + ref: ref, + client: &http.Client{Transport: tr}, + img: img, + } + + // Upload individual layers in goroutines and collect any errors. + var g errgroup.Group + for _, l := range ls { + l := l + g.Go(func() error { + return w.uploadOne(l) + }) + } + if err := g.Wait(); err != nil { + return err + } + + // Now that all the layers are uploaded, upload the config file blob. + // This must be done last because some layers may have been streamed. + l, err := partial.ConfigLayer(img) + if err != nil { + return err + } + if err := w.uploadOne(l); err != nil { + return err + } + + // With all of the constituent elements uploaded, upload the manifest + // to commit the image. + return w.commitImage() +} + +// writer writes the elements of an image to a remote image reference. +type writer struct { + ref name.Reference + client *http.Client + img v1.Image +} + +// url returns a url.Url for the specified path in the context of this remote image reference. +func (w *writer) url(path string) url.URL { + return url.URL{ + Scheme: w.ref.Context().Registry.Scheme(), + Host: w.ref.Context().RegistryStr(), + Path: path, + } +} + +// nextLocation extracts the fully-qualified URL to which we should send the next request in an upload sequence. +func (w *writer) nextLocation(resp *http.Response) (string, error) { + loc := resp.Header.Get("Location") + if len(loc) == 0 { + return "", errors.New("missing Location header") + } + u, err := url.Parse(loc) + if err != nil { + return "", err + } + + // If the location header returned is just a url path, then fully qualify it. + // We cannot simply call w.url, since there might be an embedded query string. + return resp.Request.URL.ResolveReference(u).String(), nil +} + +// checkExisting checks if a blob exists already in the repository by making a +// HEAD request to the blob store API. GCR performs an existence check on the +// initiation if "mount" is specified, even if no "from" sources are specified. +// However, this is not broadly applicable to all registries, e.g. ECR. +func (w *writer) checkExisting(h v1.Hash) (bool, error) { + u := w.url(fmt.Sprintf("/v2/%s/blobs/%s", w.ref.Context().RepositoryStr(), h.String())) + + resp, err := w.client.Head(u.String()) + if err != nil { + return false, err + } + defer resp.Body.Close() + + if err := CheckError(resp, http.StatusOK, http.StatusNotFound); err != nil { + return false, err + } + + return resp.StatusCode == http.StatusOK, nil +} + +// initiateUpload initiates the blob upload, which starts with a POST that can +// optionally include the hash of the layer and a list of repositories from +// which that layer might be read. On failure, an error is returned. +// On success, the layer was either mounted (nothing more to do) or a blob +// upload was initiated and the body of that blob should be sent to the returned +// location. +func (w *writer) initiateUpload(from, mount string) (location string, mounted bool, err error) { + u := w.url(fmt.Sprintf("/v2/%s/blobs/uploads/", w.ref.Context().RepositoryStr())) + uv := url.Values{} + if mount != "" { + uv["mount"] = []string{mount} + } + if from != "" { + uv["from"] = []string{from} + } + u.RawQuery = uv.Encode() + + // Make the request to initiate the blob upload. + resp, err := w.client.Post(u.String(), "application/json", nil) + if err != nil { + return "", false, err + } + defer resp.Body.Close() + + if err := CheckError(resp, http.StatusCreated, http.StatusAccepted); err != nil { + return "", false, err + } + + // Check the response code to determine the result. + switch resp.StatusCode { + case http.StatusCreated: + // We're done, we were able to fast-path. + return "", true, nil + case http.StatusAccepted: + // Proceed to PATCH, upload has begun. + loc, err := w.nextLocation(resp) + return loc, false, err + default: + panic("Unreachable: initiateUpload") + } +} + +// streamBlob streams the contents of the blob to the specified location. +// On failure, this will return an error. On success, this will return the location +// header indicating how to commit the streamed blob. +func (w *writer) streamBlob(blob io.ReadCloser, streamLocation string) (commitLocation string, err error) { + defer blob.Close() + + req, err := http.NewRequest(http.MethodPatch, streamLocation, blob) + if err != nil { + return "", err + } + + resp, err := w.client.Do(req) + if err != nil { + return "", err + } + defer resp.Body.Close() + + if err := CheckError(resp, http.StatusNoContent, http.StatusAccepted, http.StatusCreated); err != nil { + return "", err + } + + // The blob has been uploaded, return the location header indicating + // how to commit this layer. + return w.nextLocation(resp) +} + +// commitBlob commits this blob by sending a PUT to the location returned from +// streaming the blob. +func (w *writer) commitBlob(location, digest string) error { + u, err := url.Parse(location) + if err != nil { + return err + } + v := u.Query() + v.Set("digest", digest) + u.RawQuery = v.Encode() + + req, err := http.NewRequest(http.MethodPut, u.String(), nil) + if err != nil { + return err + } + + resp, err := w.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + return CheckError(resp, http.StatusCreated) +} + +// uploadOne performs a complete upload of a single layer. +func (w *writer) uploadOne(l v1.Layer) error { + var from, mount, digest string + if _, ok := l.(*stream.Layer); !ok { + // Layer isn't streamable, we should take advantage of that to + // skip uploading if possible. + // By sending ?digest= in the request, we'll also check that + // our computed digest matches the one computed by the + // registry. + h, err := l.Digest() + if err != nil { + return err + } + digest = h.String() + + existing, err := w.checkExisting(h) + if err != nil { + return err + } + if existing { + log.Printf("existing blob: %v", h) + return nil + } + + mount = h.String() + } + if ml, ok := l.(*MountableLayer); ok { + if w.ref.Context().RegistryStr() == ml.Reference.Context().RegistryStr() { + from = ml.Reference.Context().RepositoryStr() + } + } + + location, mounted, err := w.initiateUpload(from, mount) + if err != nil { + return err + } else if mounted { + h, err := l.Digest() + if err != nil { + return err + } + log.Printf("mounted blob: %s", h.String()) + return nil + } + + blob, err := l.Compressed() + if err != nil { + return err + } + location, err = w.streamBlob(blob, location) + if err != nil { + return err + } + + h, err := l.Digest() + if err != nil { + return err + } + digest = h.String() + + if err := w.commitBlob(location, digest); err != nil { + return err + } + log.Printf("pushed blob: %s", digest) + return nil +} + +// commitImage does a PUT of the image's manifest. +func (w *writer) commitImage() error { + raw, err := w.img.RawManifest() + if err != nil { + return err + } + mt, err := w.img.MediaType() + if err != nil { + return err + } + + u := w.url(fmt.Sprintf("/v2/%s/manifests/%s", w.ref.Context().RepositoryStr(), w.ref.Identifier())) + + // Make the request to PUT the serialized manifest + req, err := http.NewRequest(http.MethodPut, u.String(), bytes.NewBuffer(raw)) + if err != nil { + return err + } + req.Header.Set("Content-Type", string(mt)) + + resp, err := w.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + if err := CheckError(resp, http.StatusOK, http.StatusCreated, http.StatusAccepted); err != nil { + return err + } + + digest, err := w.img.Digest() + if err != nil { + return err + } + + // The image was successfully pushed! + log.Printf("%v: digest: %v size: %d", w.ref, digest, len(raw)) + return nil +} + +func scopesForUploadingImage(ref name.Reference, layers []v1.Layer) []string { + // use a map as set to remove duplicates scope strings + scopeSet := map[string]struct{}{} + + for _, l := range layers { + if ml, ok := l.(*MountableLayer); ok { + // we add push scope for ref.Context() after the loop + if ml.Reference.Context() != ref.Context() { + scopeSet[ml.Reference.Context().Scope(transport.PullScope)] = struct{}{} + } + } + } + + scopes := make([]string, 0) + // Push scope should be the first element because a few registries just look at the first scope to determine access. + scopes = append(scopes, ref.Scope(transport.PushScope)) + + for scope := range scopeSet { + scopes = append(scopes, scope) + } + + return scopes +} + +// TODO(mattmoor): WriteIndex diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/stream/layer.go b/vendor/github.com/google/go-containerregistry/pkg/v1/stream/layer.go new file mode 100644 index 00000000..d1ec96b1 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/stream/layer.go @@ -0,0 +1,176 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package stream + +import ( + "compress/gzip" + "crypto/sha256" + "encoding/hex" + "errors" + "hash" + "io" + + "github.com/google/go-containerregistry/pkg/v1" +) + +var ( + // ErrNotComputed is returned when the requested value is not yet + // computed because the stream has not been consumed yet. + ErrNotComputed = errors.New("value not computed until stream is consumed") + + // ErrConsumed is returned by Compressed when the underlying stream has + // already been consumed and closed. + ErrConsumed = errors.New("stream was already consumed") +) + +type Layer struct { + blob io.ReadCloser + consumed bool + + digest, diffID *v1.Hash + size int64 +} + +var _ v1.Layer = (*Layer)(nil) + +func NewLayer(rc io.ReadCloser) *Layer { return &Layer{blob: rc} } + +func (l *Layer) Digest() (v1.Hash, error) { + if l.digest == nil { + return v1.Hash{}, ErrNotComputed + } + return *l.digest, nil +} + +func (l *Layer) DiffID() (v1.Hash, error) { + if l.diffID == nil { + return v1.Hash{}, ErrNotComputed + } + return *l.diffID, nil +} + +func (l *Layer) Size() (int64, error) { + if l.size == 0 { + return 0, ErrNotComputed + } + return l.size, nil +} + +func (l *Layer) Uncompressed() (io.ReadCloser, error) { + return nil, errors.New("NYI: stream.Layer.Uncompressed is not implemented") +} + +func (l *Layer) Compressed() (io.ReadCloser, error) { + if l.consumed { + return nil, ErrConsumed + } + return newCompressedReader(l) +} + +type compressedReader struct { + closer io.Closer // original blob's Closer. + + h, zh hash.Hash // collects digests of compressed and uncompressed stream. + pr io.Reader + count *countWriter + + l *Layer // stream.Layer to update upon Close. +} + +func newCompressedReader(l *Layer) (*compressedReader, error) { + h := sha256.New() + zh := sha256.New() + count := &countWriter{} + + // gzip.Writer writes to the output stream via pipe, a hasher to + // capture compressed digest, and a countWriter to capture compressed + // size. + pr, pw := io.Pipe() + zw, err := gzip.NewWriterLevel(io.MultiWriter(pw, zh, count), gzip.BestSpeed) + if err != nil { + return nil, err + } + + cr := &compressedReader{ + closer: newMultiCloser(zw, l.blob), + pr: pr, + h: h, + zh: zh, + count: count, + l: l, + } + go func() { + if _, err := io.Copy(io.MultiWriter(h, zw), l.blob); err != nil { + pw.CloseWithError(err) + return + } + // Now close the compressed reader, to flush the gzip stream + // and calculate digest/diffID/size. This will cause pr to + // return EOF which will cause readers of the Compressed stream + // to finish reading. + pw.CloseWithError(cr.Close()) + }() + + return cr, nil +} + +func (cr *compressedReader) Read(b []byte) (int, error) { return cr.pr.Read(b) } + +func (cr *compressedReader) Close() error { + // Close the inner ReadCloser. + if err := cr.closer.Close(); err != nil { + return err + } + + diffID, err := v1.NewHash("sha256:" + hex.EncodeToString(cr.h.Sum(nil))) + if err != nil { + return err + } + cr.l.diffID = &diffID + + digest, err := v1.NewHash("sha256:" + hex.EncodeToString(cr.zh.Sum(nil))) + if err != nil { + return err + } + cr.l.digest = &digest + + cr.l.size = cr.count.n + cr.l.consumed = true + return nil +} + +// countWriter counts bytes written to it. +type countWriter struct{ n int64 } + +func (c *countWriter) Write(p []byte) (int, error) { + c.n += int64(len(p)) + return len(p), nil +} + +// multiCloser is a Closer that collects multiple Closers and Closes them in order. +type multiCloser []io.Closer + +var _ io.Closer = (multiCloser)(nil) + +func newMultiCloser(c ...io.Closer) multiCloser { return multiCloser(c) } + +func (m multiCloser) Close() error { + for _, c := range m { + if err := c.Close(); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/types/types.go b/vendor/github.com/google/go-containerregistry/pkg/v1/types/types.go new file mode 100644 index 00000000..ddaf7196 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/types/types.go @@ -0,0 +1,40 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +// MediaType is an enumeration of the supported mime types that an element of an image might have. +type MediaType string + +// The collection of known MediaType values. +const ( + OCIContentDescriptor MediaType = "application/vnd.oci.descriptor.v1+json" + OCIImageIndex MediaType = "application/vnd.oci.image.index.v1+json" + OCIManifestSchema1 MediaType = "application/vnd.oci.image.manifest.v1+json" + OCIConfigJSON MediaType = "application/vnd.oci.image.config.v1+json" + OCILayer MediaType = "application/vnd.oci.image.layer.v1.tar+gzip" + OCIRestrictedLayer MediaType = "application/vnd.oci.image.layer.nondistributable.v1.tar+gzip" + OCIUncompressedLayer MediaType = "application/vnd.oci.image.layer.v1.tar" + OCIUncompressedRestrictedLayer MediaType = "application/vnd.oci.image.layer.nondistributable.v1.tar" + + DockerManifestSchema1 MediaType = "application/vnd.docker.distribution.manifest.v1+json" + DockerManifestSchema1Signed MediaType = "application/vnd.docker.distribution.manifest.v1+prettyjws" + DockerManifestSchema2 MediaType = "application/vnd.docker.distribution.manifest.v2+json" + DockerManifestList MediaType = "application/vnd.docker.distribution.manifest.list.v2+json" + DockerLayer MediaType = "application/vnd.docker.image.rootfs.diff.tar.gzip" + DockerConfigJSON MediaType = "application/vnd.docker.container.image.v1+json" + DockerPluginConfig MediaType = "application/vnd.docker.plugin.v1+json" + DockerForeignLayer MediaType = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip" + DockerUncompressedLayer MediaType = "application/vnd.docker.image.rootfs.diff.tar" +) diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/v1util/and_closer.go b/vendor/github.com/google/go-containerregistry/pkg/v1/v1util/and_closer.go new file mode 100644 index 00000000..0925f13d --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/v1util/and_closer.go @@ -0,0 +1,47 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1util + +import ( + "io" +) + +// readAndCloser implements io.ReadCloser by reading from a particular io.Reader +// and then calling the provided "Close()" method. +type readAndCloser struct { + io.Reader + CloseFunc func() error +} + +var _ io.ReadCloser = (*readAndCloser)(nil) + +// Close implements io.ReadCloser +func (rac *readAndCloser) Close() error { + return rac.CloseFunc() +} + +// writeAndCloser implements io.WriteCloser by reading from a particular io.Writer +// and then calling the provided "Close()" method. +type writeAndCloser struct { + io.Writer + CloseFunc func() error +} + +var _ io.WriteCloser = (*writeAndCloser)(nil) + +// Close implements io.WriteCloser +func (wac *writeAndCloser) Close() error { + return wac.CloseFunc() +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/v1util/nop.go b/vendor/github.com/google/go-containerregistry/pkg/v1/v1util/nop.go new file mode 100644 index 00000000..8ff288d9 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/v1util/nop.go @@ -0,0 +1,40 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1util + +import ( + "io" +) + +func nop() error { + return nil +} + +// NopWriteCloser wraps the io.Writer as an io.WriteCloser with a Close() method that does nothing. +func NopWriteCloser(w io.Writer) io.WriteCloser { + return &writeAndCloser{ + Writer: w, + CloseFunc: nop, + } +} + +// NopReadCloser wraps the io.Reader as an io.ReadCloser with a Close() method that does nothing. +// This is technically redundant with ioutil.NopCloser, but provided for symmetry and clarity. +func NopReadCloser(r io.Reader) io.ReadCloser { + return &readAndCloser{ + Reader: r, + CloseFunc: nop, + } +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/v1util/verify.go b/vendor/github.com/google/go-containerregistry/pkg/v1/v1util/verify.go new file mode 100644 index 00000000..7ebb9dde --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/v1util/verify.go @@ -0,0 +1,61 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1util + +import ( + "encoding/hex" + "fmt" + "hash" + "io" + + "github.com/google/go-containerregistry/pkg/v1" +) + +type verifyReader struct { + inner io.Reader + hasher hash.Hash + expected v1.Hash +} + +// Read implements io.Reader +func (vc *verifyReader) Read(b []byte) (int, error) { + n, err := vc.inner.Read(b) + if err == io.EOF { + got := hex.EncodeToString(vc.hasher.Sum(make([]byte, 0, vc.hasher.Size()))) + if want := vc.expected.Hex; got != want { + return n, fmt.Errorf("error verifying %s checksum; got %q, want %q", + vc.expected.Algorithm, got, want) + } + } + return n, err +} + +// VerifyReadCloser wraps the given io.ReadCloser to verify that its contents match +// the provided v1.Hash before io.EOF is returned. +func VerifyReadCloser(r io.ReadCloser, h v1.Hash) (io.ReadCloser, error) { + w, err := v1.Hasher(h.Algorithm) + if err != nil { + return nil, err + } + r2 := io.TeeReader(r, w) + return &readAndCloser{ + Reader: &verifyReader{ + inner: r2, + hasher: w, + expected: h, + }, + CloseFunc: r.Close, + }, nil +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/v1util/zip.go b/vendor/github.com/google/go-containerregistry/pkg/v1/v1util/zip.go new file mode 100644 index 00000000..57514a4e --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/v1util/zip.go @@ -0,0 +1,80 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1util + +import ( + "bytes" + "compress/gzip" + "io" +) + +var gzipMagicHeader = []byte{'\x1f', '\x8b'} + +// GzipReadCloser reads uncompressed input data from the io.ReadCloser and +// returns an io.ReadCloser from which compressed data may be read. +// This uses gzip.BestSpeed for the compression level. +func GzipReadCloser(r io.ReadCloser) (io.ReadCloser, error) { + return GzipReadCloserLevel(r, gzip.BestSpeed) +} + +// GzipReadCloserLevel reads uncompressed input data from the io.ReadCloser and +// returns an io.ReadCloser from which compressed data may be read. +// Refer to compress/gzip for the level: +// https://golang.org/pkg/compress/gzip/#pkg-constants +func GzipReadCloserLevel(r io.ReadCloser, level int) (io.ReadCloser, error) { + pr, pw := io.Pipe() + + go func() { + defer pw.Close() + defer r.Close() + + gw, _ := gzip.NewWriterLevel(pw, level) + defer gw.Close() + + _, err := io.Copy(gw, r) + if err != nil { + pr.CloseWithError(err) + } + }() + + return pr, nil +} + +// GunzipReadCloser reads compressed input data from the io.ReadCloser and +// returns an io.ReadCloser from which uncompessed data may be read. +func GunzipReadCloser(r io.ReadCloser) (io.ReadCloser, error) { + gr, err := gzip.NewReader(r) + if err != nil { + return nil, err + } + return &readAndCloser{ + Reader: gr, + CloseFunc: func() error { + if err := gr.Close(); err != nil { + return err + } + return r.Close() + }, + }, nil +} + +// IsGzipped detects whether the input stream is compressed. +func IsGzipped(r io.Reader) (bool, error) { + magicHeader := make([]byte, 2) + if _, err := r.Read(magicHeader); err != nil { + return false, err + } + return bytes.Equal(magicHeader, gzipMagicHeader), nil +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/zz_deepcopy_generated.go b/vendor/github.com/google/go-containerregistry/pkg/v1/zz_deepcopy_generated.go new file mode 100644 index 00000000..3440b5e1 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/zz_deepcopy_generated.go @@ -0,0 +1,314 @@ +// +build !ignore_autogenerated + +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Config) DeepCopyInto(out *Config) { + *out = *in + if in.Cmd != nil { + in, out := &in.Cmd, &out.Cmd + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Healthcheck != nil { + in, out := &in.Healthcheck, &out.Healthcheck + *out = new(HealthConfig) + (*in).DeepCopyInto(*out) + } + if in.Entrypoint != nil { + in, out := &in.Entrypoint, &out.Entrypoint + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.OnBuild != nil { + in, out := &in.OnBuild, &out.OnBuild + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make(map[string]struct{}, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.ExposedPorts != nil { + in, out := &in.ExposedPorts, &out.ExposedPorts + *out = make(map[string]struct{}, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Shell != nil { + in, out := &in.Shell, &out.Shell + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Config. +func (in *Config) DeepCopy() *Config { + if in == nil { + return nil + } + out := new(Config) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigFile) DeepCopyInto(out *ConfigFile) { + *out = *in + in.Created.DeepCopyInto(&out.Created) + if in.History != nil { + in, out := &in.History, &out.History + *out = make([]History, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.RootFS.DeepCopyInto(&out.RootFS) + in.Config.DeepCopyInto(&out.Config) + in.ContainerConfig.DeepCopyInto(&out.ContainerConfig) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigFile. +func (in *ConfigFile) DeepCopy() *ConfigFile { + if in == nil { + return nil + } + out := new(ConfigFile) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Descriptor) DeepCopyInto(out *Descriptor) { + *out = *in + out.Digest = in.Digest + if in.URLs != nil { + in, out := &in.URLs, &out.URLs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Platform != nil { + in, out := &in.Platform, &out.Platform + *out = new(Platform) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Descriptor. +func (in *Descriptor) DeepCopy() *Descriptor { + if in == nil { + return nil + } + out := new(Descriptor) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Hash) DeepCopyInto(out *Hash) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Hash. +func (in *Hash) DeepCopy() *Hash { + if in == nil { + return nil + } + out := new(Hash) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HealthConfig) DeepCopyInto(out *HealthConfig) { + *out = *in + if in.Test != nil { + in, out := &in.Test, &out.Test + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthConfig. +func (in *HealthConfig) DeepCopy() *HealthConfig { + if in == nil { + return nil + } + out := new(HealthConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *History) DeepCopyInto(out *History) { + *out = *in + in.Created.DeepCopyInto(&out.Created) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new History. +func (in *History) DeepCopy() *History { + if in == nil { + return nil + } + out := new(History) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IndexManifest) DeepCopyInto(out *IndexManifest) { + *out = *in + if in.Manifests != nil { + in, out := &in.Manifests, &out.Manifests + *out = make([]Descriptor, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexManifest. +func (in *IndexManifest) DeepCopy() *IndexManifest { + if in == nil { + return nil + } + out := new(IndexManifest) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Manifest) DeepCopyInto(out *Manifest) { + *out = *in + in.Config.DeepCopyInto(&out.Config) + if in.Layers != nil { + in, out := &in.Layers, &out.Layers + *out = make([]Descriptor, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Manifest. +func (in *Manifest) DeepCopy() *Manifest { + if in == nil { + return nil + } + out := new(Manifest) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Platform) DeepCopyInto(out *Platform) { + *out = *in + if in.OSFeatures != nil { + in, out := &in.OSFeatures, &out.OSFeatures + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Platform. +func (in *Platform) DeepCopy() *Platform { + if in == nil { + return nil + } + out := new(Platform) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RootFS) DeepCopyInto(out *RootFS) { + *out = *in + if in.DiffIDs != nil { + in, out := &in.DiffIDs, &out.DiffIDs + *out = make([]Hash, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RootFS. +func (in *RootFS) DeepCopy() *RootFS { + if in == nil { + return nil + } + out := new(RootFS) + in.DeepCopyInto(out) + return out +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Time. +func (in *Time) DeepCopy() *Time { + if in == nil { + return nil + } + out := new(Time) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/konsorten/go-windows-terminal-sequences/LICENSE b/vendor/github.com/konsorten/go-windows-terminal-sequences/LICENSE new file mode 100644 index 00000000..14127cd8 --- /dev/null +++ b/vendor/github.com/konsorten/go-windows-terminal-sequences/LICENSE @@ -0,0 +1,9 @@ +(The MIT License) + +Copyright (c) 2017 marvin + konsorten GmbH (open-source@konsorten.de) + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the 'Software'), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences.go b/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences.go new file mode 100644 index 00000000..ef18d8f9 --- /dev/null +++ b/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences.go @@ -0,0 +1,36 @@ +// +build windows + +package sequences + +import ( + "syscall" + "unsafe" +) + +var ( + kernel32Dll *syscall.LazyDLL = syscall.NewLazyDLL("Kernel32.dll") + setConsoleMode *syscall.LazyProc = kernel32Dll.NewProc("SetConsoleMode") +) + +func EnableVirtualTerminalProcessing(stream syscall.Handle, enable bool) error { + const ENABLE_VIRTUAL_TERMINAL_PROCESSING uint32 = 0x4 + + var mode uint32 + err := syscall.GetConsoleMode(syscall.Stdout, &mode) + if err != nil { + return err + } + + if enable { + mode |= ENABLE_VIRTUAL_TERMINAL_PROCESSING + } else { + mode &^= ENABLE_VIRTUAL_TERMINAL_PROCESSING + } + + ret, _, err := setConsoleMode.Call(uintptr(unsafe.Pointer(stream)), uintptr(mode)) + if ret == 0 { + return err + } + + return nil +} diff --git a/vendor/github.com/sergi/go-diff/AUTHORS b/vendor/github.com/sergi/go-diff/AUTHORS new file mode 100644 index 00000000..2d7bb2bf --- /dev/null +++ b/vendor/github.com/sergi/go-diff/AUTHORS @@ -0,0 +1,25 @@ +# This is the official list of go-diff authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS files. +# See the latter for an explanation. + +# Names should be added to this file as +# Name or Organization +# The email address is not required for organizations. + +# Please keep the list sorted. + +Danny Yoo +James Kolb +Jonathan Amsterdam +Markus Zimmermann +Matt Kovars +Örjan Persson +Osman Masood +Robert Carlsen +Rory Flynn +Sergi Mansilla +Shatrugna Sadhu +Shawn Smith +Stas Maksimov +Tor Arvid Lund +Zac Bergquist diff --git a/vendor/github.com/sergi/go-diff/CONTRIBUTORS b/vendor/github.com/sergi/go-diff/CONTRIBUTORS new file mode 100644 index 00000000..369e3d55 --- /dev/null +++ b/vendor/github.com/sergi/go-diff/CONTRIBUTORS @@ -0,0 +1,32 @@ +# This is the official list of people who can contribute +# (and typically have contributed) code to the go-diff +# repository. +# +# The AUTHORS file lists the copyright holders; this file +# lists people. For example, ACME Inc. employees would be listed here +# but not in AUTHORS, because ACME Inc. would hold the copyright. +# +# When adding J Random Contributor's name to this file, +# either J's name or J's organization's name should be +# added to the AUTHORS file. +# +# Names should be added to this file like so: +# Name +# +# Please keep the list sorted. + +Danny Yoo +James Kolb +Jonathan Amsterdam +Markus Zimmermann +Matt Kovars +Örjan Persson +Osman Masood +Robert Carlsen +Rory Flynn +Sergi Mansilla +Shatrugna Sadhu +Shawn Smith +Stas Maksimov +Tor Arvid Lund +Zac Bergquist diff --git a/vendor/github.com/sergi/go-diff/LICENSE b/vendor/github.com/sergi/go-diff/LICENSE new file mode 100644 index 00000000..937942c2 --- /dev/null +++ b/vendor/github.com/sergi/go-diff/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2012-2016 The go-diff Authors. All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining a +copy of this software and associated documentation files (the "Software"), +to deal in the Software without restriction, including without limitation +the rights to use, copy, modify, merge, publish, distribute, sublicense, +and/or sell copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included +in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. + diff --git a/vendor/github.com/sergi/go-diff/diffmatchpatch/diff.go b/vendor/github.com/sergi/go-diff/diffmatchpatch/diff.go new file mode 100644 index 00000000..82ad7bc8 --- /dev/null +++ b/vendor/github.com/sergi/go-diff/diffmatchpatch/diff.go @@ -0,0 +1,1344 @@ +// Copyright (c) 2012-2016 The go-diff authors. All rights reserved. +// https://github.com/sergi/go-diff +// See the included LICENSE file for license details. +// +// go-diff is a Go implementation of Google's Diff, Match, and Patch library +// Original library is Copyright (c) 2006 Google Inc. +// http://code.google.com/p/google-diff-match-patch/ + +package diffmatchpatch + +import ( + "bytes" + "errors" + "fmt" + "html" + "math" + "net/url" + "regexp" + "strconv" + "strings" + "time" + "unicode/utf8" +) + +// Operation defines the operation of a diff item. +type Operation int8 + +const ( + // DiffDelete item represents a delete diff. + DiffDelete Operation = -1 + // DiffInsert item represents an insert diff. + DiffInsert Operation = 1 + // DiffEqual item represents an equal diff. + DiffEqual Operation = 0 +) + +// Diff represents one diff operation +type Diff struct { + Type Operation + Text string +} + +func splice(slice []Diff, index int, amount int, elements ...Diff) []Diff { + return append(slice[:index], append(elements, slice[index+amount:]...)...) +} + +// DiffMain finds the differences between two texts. +// If an invalid UTF-8 sequence is encountered, it will be replaced by the Unicode replacement character. +func (dmp *DiffMatchPatch) DiffMain(text1, text2 string, checklines bool) []Diff { + return dmp.DiffMainRunes([]rune(text1), []rune(text2), checklines) +} + +// DiffMainRunes finds the differences between two rune sequences. +// If an invalid UTF-8 sequence is encountered, it will be replaced by the Unicode replacement character. +func (dmp *DiffMatchPatch) DiffMainRunes(text1, text2 []rune, checklines bool) []Diff { + var deadline time.Time + if dmp.DiffTimeout > 0 { + deadline = time.Now().Add(dmp.DiffTimeout) + } + return dmp.diffMainRunes(text1, text2, checklines, deadline) +} + +func (dmp *DiffMatchPatch) diffMainRunes(text1, text2 []rune, checklines bool, deadline time.Time) []Diff { + if runesEqual(text1, text2) { + var diffs []Diff + if len(text1) > 0 { + diffs = append(diffs, Diff{DiffEqual, string(text1)}) + } + return diffs + } + // Trim off common prefix (speedup). + commonlength := commonPrefixLength(text1, text2) + commonprefix := text1[:commonlength] + text1 = text1[commonlength:] + text2 = text2[commonlength:] + + // Trim off common suffix (speedup). + commonlength = commonSuffixLength(text1, text2) + commonsuffix := text1[len(text1)-commonlength:] + text1 = text1[:len(text1)-commonlength] + text2 = text2[:len(text2)-commonlength] + + // Compute the diff on the middle block. + diffs := dmp.diffCompute(text1, text2, checklines, deadline) + + // Restore the prefix and suffix. + if len(commonprefix) != 0 { + diffs = append([]Diff{Diff{DiffEqual, string(commonprefix)}}, diffs...) + } + if len(commonsuffix) != 0 { + diffs = append(diffs, Diff{DiffEqual, string(commonsuffix)}) + } + + return dmp.DiffCleanupMerge(diffs) +} + +// diffCompute finds the differences between two rune slices. Assumes that the texts do not have any common prefix or suffix. +func (dmp *DiffMatchPatch) diffCompute(text1, text2 []rune, checklines bool, deadline time.Time) []Diff { + diffs := []Diff{} + if len(text1) == 0 { + // Just add some text (speedup). + return append(diffs, Diff{DiffInsert, string(text2)}) + } else if len(text2) == 0 { + // Just delete some text (speedup). + return append(diffs, Diff{DiffDelete, string(text1)}) + } + + var longtext, shorttext []rune + if len(text1) > len(text2) { + longtext = text1 + shorttext = text2 + } else { + longtext = text2 + shorttext = text1 + } + + if i := runesIndex(longtext, shorttext); i != -1 { + op := DiffInsert + // Swap insertions for deletions if diff is reversed. + if len(text1) > len(text2) { + op = DiffDelete + } + // Shorter text is inside the longer text (speedup). + return []Diff{ + Diff{op, string(longtext[:i])}, + Diff{DiffEqual, string(shorttext)}, + Diff{op, string(longtext[i+len(shorttext):])}, + } + } else if len(shorttext) == 1 { + // Single character string. + // After the previous speedup, the character can't be an equality. + return []Diff{ + Diff{DiffDelete, string(text1)}, + Diff{DiffInsert, string(text2)}, + } + // Check to see if the problem can be split in two. + } else if hm := dmp.diffHalfMatch(text1, text2); hm != nil { + // A half-match was found, sort out the return data. + text1A := hm[0] + text1B := hm[1] + text2A := hm[2] + text2B := hm[3] + midCommon := hm[4] + // Send both pairs off for separate processing. + diffsA := dmp.diffMainRunes(text1A, text2A, checklines, deadline) + diffsB := dmp.diffMainRunes(text1B, text2B, checklines, deadline) + // Merge the results. + return append(diffsA, append([]Diff{Diff{DiffEqual, string(midCommon)}}, diffsB...)...) + } else if checklines && len(text1) > 100 && len(text2) > 100 { + return dmp.diffLineMode(text1, text2, deadline) + } + return dmp.diffBisect(text1, text2, deadline) +} + +// diffLineMode does a quick line-level diff on both []runes, then rediff the parts for greater accuracy. This speedup can produce non-minimal diffs. +func (dmp *DiffMatchPatch) diffLineMode(text1, text2 []rune, deadline time.Time) []Diff { + // Scan the text on a line-by-line basis first. + text1, text2, linearray := dmp.diffLinesToRunes(text1, text2) + + diffs := dmp.diffMainRunes(text1, text2, false, deadline) + + // Convert the diff back to original text. + diffs = dmp.DiffCharsToLines(diffs, linearray) + // Eliminate freak matches (e.g. blank lines) + diffs = dmp.DiffCleanupSemantic(diffs) + + // Rediff any replacement blocks, this time character-by-character. + // Add a dummy entry at the end. + diffs = append(diffs, Diff{DiffEqual, ""}) + + pointer := 0 + countDelete := 0 + countInsert := 0 + + // NOTE: Rune slices are slower than using strings in this case. + textDelete := "" + textInsert := "" + + for pointer < len(diffs) { + switch diffs[pointer].Type { + case DiffInsert: + countInsert++ + textInsert += diffs[pointer].Text + case DiffDelete: + countDelete++ + textDelete += diffs[pointer].Text + case DiffEqual: + // Upon reaching an equality, check for prior redundancies. + if countDelete >= 1 && countInsert >= 1 { + // Delete the offending records and add the merged ones. + diffs = splice(diffs, pointer-countDelete-countInsert, + countDelete+countInsert) + + pointer = pointer - countDelete - countInsert + a := dmp.diffMainRunes([]rune(textDelete), []rune(textInsert), false, deadline) + for j := len(a) - 1; j >= 0; j-- { + diffs = splice(diffs, pointer, 0, a[j]) + } + pointer = pointer + len(a) + } + + countInsert = 0 + countDelete = 0 + textDelete = "" + textInsert = "" + } + pointer++ + } + + return diffs[:len(diffs)-1] // Remove the dummy entry at the end. +} + +// DiffBisect finds the 'middle snake' of a diff, split the problem in two and return the recursively constructed diff. +// If an invalid UTF-8 sequence is encountered, it will be replaced by the Unicode replacement character. +// See Myers 1986 paper: An O(ND) Difference Algorithm and Its Variations. +func (dmp *DiffMatchPatch) DiffBisect(text1, text2 string, deadline time.Time) []Diff { + // Unused in this code, but retained for interface compatibility. + return dmp.diffBisect([]rune(text1), []rune(text2), deadline) +} + +// diffBisect finds the 'middle snake' of a diff, splits the problem in two and returns the recursively constructed diff. +// See Myers's 1986 paper: An O(ND) Difference Algorithm and Its Variations. +func (dmp *DiffMatchPatch) diffBisect(runes1, runes2 []rune, deadline time.Time) []Diff { + // Cache the text lengths to prevent multiple calls. + runes1Len, runes2Len := len(runes1), len(runes2) + + maxD := (runes1Len + runes2Len + 1) / 2 + vOffset := maxD + vLength := 2 * maxD + + v1 := make([]int, vLength) + v2 := make([]int, vLength) + for i := range v1 { + v1[i] = -1 + v2[i] = -1 + } + v1[vOffset+1] = 0 + v2[vOffset+1] = 0 + + delta := runes1Len - runes2Len + // If the total number of characters is odd, then the front path will collide with the reverse path. + front := (delta%2 != 0) + // Offsets for start and end of k loop. Prevents mapping of space beyond the grid. + k1start := 0 + k1end := 0 + k2start := 0 + k2end := 0 + for d := 0; d < maxD; d++ { + // Bail out if deadline is reached. + if !deadline.IsZero() && time.Now().After(deadline) { + break + } + + // Walk the front path one step. + for k1 := -d + k1start; k1 <= d-k1end; k1 += 2 { + k1Offset := vOffset + k1 + var x1 int + + if k1 == -d || (k1 != d && v1[k1Offset-1] < v1[k1Offset+1]) { + x1 = v1[k1Offset+1] + } else { + x1 = v1[k1Offset-1] + 1 + } + + y1 := x1 - k1 + for x1 < runes1Len && y1 < runes2Len { + if runes1[x1] != runes2[y1] { + break + } + x1++ + y1++ + } + v1[k1Offset] = x1 + if x1 > runes1Len { + // Ran off the right of the graph. + k1end += 2 + } else if y1 > runes2Len { + // Ran off the bottom of the graph. + k1start += 2 + } else if front { + k2Offset := vOffset + delta - k1 + if k2Offset >= 0 && k2Offset < vLength && v2[k2Offset] != -1 { + // Mirror x2 onto top-left coordinate system. + x2 := runes1Len - v2[k2Offset] + if x1 >= x2 { + // Overlap detected. + return dmp.diffBisectSplit(runes1, runes2, x1, y1, deadline) + } + } + } + } + // Walk the reverse path one step. + for k2 := -d + k2start; k2 <= d-k2end; k2 += 2 { + k2Offset := vOffset + k2 + var x2 int + if k2 == -d || (k2 != d && v2[k2Offset-1] < v2[k2Offset+1]) { + x2 = v2[k2Offset+1] + } else { + x2 = v2[k2Offset-1] + 1 + } + var y2 = x2 - k2 + for x2 < runes1Len && y2 < runes2Len { + if runes1[runes1Len-x2-1] != runes2[runes2Len-y2-1] { + break + } + x2++ + y2++ + } + v2[k2Offset] = x2 + if x2 > runes1Len { + // Ran off the left of the graph. + k2end += 2 + } else if y2 > runes2Len { + // Ran off the top of the graph. + k2start += 2 + } else if !front { + k1Offset := vOffset + delta - k2 + if k1Offset >= 0 && k1Offset < vLength && v1[k1Offset] != -1 { + x1 := v1[k1Offset] + y1 := vOffset + x1 - k1Offset + // Mirror x2 onto top-left coordinate system. + x2 = runes1Len - x2 + if x1 >= x2 { + // Overlap detected. + return dmp.diffBisectSplit(runes1, runes2, x1, y1, deadline) + } + } + } + } + } + // Diff took too long and hit the deadline or number of diffs equals number of characters, no commonality at all. + return []Diff{ + Diff{DiffDelete, string(runes1)}, + Diff{DiffInsert, string(runes2)}, + } +} + +func (dmp *DiffMatchPatch) diffBisectSplit(runes1, runes2 []rune, x, y int, + deadline time.Time) []Diff { + runes1a := runes1[:x] + runes2a := runes2[:y] + runes1b := runes1[x:] + runes2b := runes2[y:] + + // Compute both diffs serially. + diffs := dmp.diffMainRunes(runes1a, runes2a, false, deadline) + diffsb := dmp.diffMainRunes(runes1b, runes2b, false, deadline) + + return append(diffs, diffsb...) +} + +// DiffLinesToChars splits two texts into a list of strings, and educes the texts to a string of hashes where each Unicode character represents one line. +// It's slightly faster to call DiffLinesToRunes first, followed by DiffMainRunes. +func (dmp *DiffMatchPatch) DiffLinesToChars(text1, text2 string) (string, string, []string) { + chars1, chars2, lineArray := dmp.DiffLinesToRunes(text1, text2) + return string(chars1), string(chars2), lineArray +} + +// DiffLinesToRunes splits two texts into a list of runes. Each rune represents one line. +func (dmp *DiffMatchPatch) DiffLinesToRunes(text1, text2 string) ([]rune, []rune, []string) { + // '\x00' is a valid character, but various debuggers don't like it. So we'll insert a junk entry to avoid generating a null character. + lineArray := []string{""} // e.g. lineArray[4] == 'Hello\n' + lineHash := map[string]int{} // e.g. lineHash['Hello\n'] == 4 + + chars1 := dmp.diffLinesToRunesMunge(text1, &lineArray, lineHash) + chars2 := dmp.diffLinesToRunesMunge(text2, &lineArray, lineHash) + + return chars1, chars2, lineArray +} + +func (dmp *DiffMatchPatch) diffLinesToRunes(text1, text2 []rune) ([]rune, []rune, []string) { + return dmp.DiffLinesToRunes(string(text1), string(text2)) +} + +// diffLinesToRunesMunge splits a text into an array of strings, and reduces the texts to a []rune where each Unicode character represents one line. +// We use strings instead of []runes as input mainly because you can't use []rune as a map key. +func (dmp *DiffMatchPatch) diffLinesToRunesMunge(text string, lineArray *[]string, lineHash map[string]int) []rune { + // Walk the text, pulling out a substring for each line. text.split('\n') would would temporarily double our memory footprint. Modifying text would create many large strings to garbage collect. + lineStart := 0 + lineEnd := -1 + runes := []rune{} + + for lineEnd < len(text)-1 { + lineEnd = indexOf(text, "\n", lineStart) + + if lineEnd == -1 { + lineEnd = len(text) - 1 + } + + line := text[lineStart : lineEnd+1] + lineStart = lineEnd + 1 + lineValue, ok := lineHash[line] + + if ok { + runes = append(runes, rune(lineValue)) + } else { + *lineArray = append(*lineArray, line) + lineHash[line] = len(*lineArray) - 1 + runes = append(runes, rune(len(*lineArray)-1)) + } + } + + return runes +} + +// DiffCharsToLines rehydrates the text in a diff from a string of line hashes to real lines of text. +func (dmp *DiffMatchPatch) DiffCharsToLines(diffs []Diff, lineArray []string) []Diff { + hydrated := make([]Diff, 0, len(diffs)) + for _, aDiff := range diffs { + chars := aDiff.Text + text := make([]string, len(chars)) + + for i, r := range chars { + text[i] = lineArray[r] + } + + aDiff.Text = strings.Join(text, "") + hydrated = append(hydrated, aDiff) + } + return hydrated +} + +// DiffCommonPrefix determines the common prefix length of two strings. +func (dmp *DiffMatchPatch) DiffCommonPrefix(text1, text2 string) int { + // Unused in this code, but retained for interface compatibility. + return commonPrefixLength([]rune(text1), []rune(text2)) +} + +// DiffCommonSuffix determines the common suffix length of two strings. +func (dmp *DiffMatchPatch) DiffCommonSuffix(text1, text2 string) int { + // Unused in this code, but retained for interface compatibility. + return commonSuffixLength([]rune(text1), []rune(text2)) +} + +// commonPrefixLength returns the length of the common prefix of two rune slices. +func commonPrefixLength(text1, text2 []rune) int { + short, long := text1, text2 + if len(short) > len(long) { + short, long = long, short + } + for i, r := range short { + if r != long[i] { + return i + } + } + return len(short) +} + +// commonSuffixLength returns the length of the common suffix of two rune slices. +func commonSuffixLength(text1, text2 []rune) int { + n := min(len(text1), len(text2)) + for i := 0; i < n; i++ { + if text1[len(text1)-i-1] != text2[len(text2)-i-1] { + return i + } + } + return n + + // TODO research and benchmark this, why is it not activated? https://github.com/sergi/go-diff/issues/54 + // Binary search. + // Performance analysis: http://neil.fraser.name/news/2007/10/09/ + /* + pointermin := 0 + pointermax := math.Min(len(text1), len(text2)) + pointermid := pointermax + pointerend := 0 + for pointermin < pointermid { + if text1[len(text1)-pointermid:len(text1)-pointerend] == + text2[len(text2)-pointermid:len(text2)-pointerend] { + pointermin = pointermid + pointerend = pointermin + } else { + pointermax = pointermid + } + pointermid = math.Floor((pointermax-pointermin)/2 + pointermin) + } + return pointermid + */ +} + +// DiffCommonOverlap determines if the suffix of one string is the prefix of another. +func (dmp *DiffMatchPatch) DiffCommonOverlap(text1 string, text2 string) int { + // Cache the text lengths to prevent multiple calls. + text1Length := len(text1) + text2Length := len(text2) + // Eliminate the null case. + if text1Length == 0 || text2Length == 0 { + return 0 + } + // Truncate the longer string. + if text1Length > text2Length { + text1 = text1[text1Length-text2Length:] + } else if text1Length < text2Length { + text2 = text2[0:text1Length] + } + textLength := int(math.Min(float64(text1Length), float64(text2Length))) + // Quick check for the worst case. + if text1 == text2 { + return textLength + } + + // Start by looking for a single character match and increase length until no match is found. Performance analysis: http://neil.fraser.name/news/2010/11/04/ + best := 0 + length := 1 + for { + pattern := text1[textLength-length:] + found := strings.Index(text2, pattern) + if found == -1 { + break + } + length += found + if found == 0 || text1[textLength-length:] == text2[0:length] { + best = length + length++ + } + } + + return best +} + +// DiffHalfMatch checks whether the two texts share a substring which is at least half the length of the longer text. This speedup can produce non-minimal diffs. +func (dmp *DiffMatchPatch) DiffHalfMatch(text1, text2 string) []string { + // Unused in this code, but retained for interface compatibility. + runeSlices := dmp.diffHalfMatch([]rune(text1), []rune(text2)) + if runeSlices == nil { + return nil + } + + result := make([]string, len(runeSlices)) + for i, r := range runeSlices { + result[i] = string(r) + } + return result +} + +func (dmp *DiffMatchPatch) diffHalfMatch(text1, text2 []rune) [][]rune { + if dmp.DiffTimeout <= 0 { + // Don't risk returning a non-optimal diff if we have unlimited time. + return nil + } + + var longtext, shorttext []rune + if len(text1) > len(text2) { + longtext = text1 + shorttext = text2 + } else { + longtext = text2 + shorttext = text1 + } + + if len(longtext) < 4 || len(shorttext)*2 < len(longtext) { + return nil // Pointless. + } + + // First check if the second quarter is the seed for a half-match. + hm1 := dmp.diffHalfMatchI(longtext, shorttext, int(float64(len(longtext)+3)/4)) + + // Check again based on the third quarter. + hm2 := dmp.diffHalfMatchI(longtext, shorttext, int(float64(len(longtext)+1)/2)) + + hm := [][]rune{} + if hm1 == nil && hm2 == nil { + return nil + } else if hm2 == nil { + hm = hm1 + } else if hm1 == nil { + hm = hm2 + } else { + // Both matched. Select the longest. + if len(hm1[4]) > len(hm2[4]) { + hm = hm1 + } else { + hm = hm2 + } + } + + // A half-match was found, sort out the return data. + if len(text1) > len(text2) { + return hm + } + + return [][]rune{hm[2], hm[3], hm[0], hm[1], hm[4]} +} + +// diffHalfMatchI checks if a substring of shorttext exist within longtext such that the substring is at least half the length of longtext? +// Returns a slice containing the prefix of longtext, the suffix of longtext, the prefix of shorttext, the suffix of shorttext and the common middle, or null if there was no match. +func (dmp *DiffMatchPatch) diffHalfMatchI(l, s []rune, i int) [][]rune { + var bestCommonA []rune + var bestCommonB []rune + var bestCommonLen int + var bestLongtextA []rune + var bestLongtextB []rune + var bestShorttextA []rune + var bestShorttextB []rune + + // Start with a 1/4 length substring at position i as a seed. + seed := l[i : i+len(l)/4] + + for j := runesIndexOf(s, seed, 0); j != -1; j = runesIndexOf(s, seed, j+1) { + prefixLength := commonPrefixLength(l[i:], s[j:]) + suffixLength := commonSuffixLength(l[:i], s[:j]) + + if bestCommonLen < suffixLength+prefixLength { + bestCommonA = s[j-suffixLength : j] + bestCommonB = s[j : j+prefixLength] + bestCommonLen = len(bestCommonA) + len(bestCommonB) + bestLongtextA = l[:i-suffixLength] + bestLongtextB = l[i+prefixLength:] + bestShorttextA = s[:j-suffixLength] + bestShorttextB = s[j+prefixLength:] + } + } + + if bestCommonLen*2 < len(l) { + return nil + } + + return [][]rune{ + bestLongtextA, + bestLongtextB, + bestShorttextA, + bestShorttextB, + append(bestCommonA, bestCommonB...), + } +} + +// DiffCleanupSemantic reduces the number of edits by eliminating semantically trivial equalities. +func (dmp *DiffMatchPatch) DiffCleanupSemantic(diffs []Diff) []Diff { + changes := false + // Stack of indices where equalities are found. + type equality struct { + data int + next *equality + } + var equalities *equality + + var lastequality string + // Always equal to diffs[equalities[equalitiesLength - 1]][1] + var pointer int // Index of current position. + // Number of characters that changed prior to the equality. + var lengthInsertions1, lengthDeletions1 int + // Number of characters that changed after the equality. + var lengthInsertions2, lengthDeletions2 int + + for pointer < len(diffs) { + if diffs[pointer].Type == DiffEqual { + // Equality found. + + equalities = &equality{ + data: pointer, + next: equalities, + } + lengthInsertions1 = lengthInsertions2 + lengthDeletions1 = lengthDeletions2 + lengthInsertions2 = 0 + lengthDeletions2 = 0 + lastequality = diffs[pointer].Text + } else { + // An insertion or deletion. + + if diffs[pointer].Type == DiffInsert { + lengthInsertions2 += len(diffs[pointer].Text) + } else { + lengthDeletions2 += len(diffs[pointer].Text) + } + // Eliminate an equality that is smaller or equal to the edits on both sides of it. + difference1 := int(math.Max(float64(lengthInsertions1), float64(lengthDeletions1))) + difference2 := int(math.Max(float64(lengthInsertions2), float64(lengthDeletions2))) + if len(lastequality) > 0 && + (len(lastequality) <= difference1) && + (len(lastequality) <= difference2) { + // Duplicate record. + insPoint := equalities.data + diffs = append( + diffs[:insPoint], + append([]Diff{Diff{DiffDelete, lastequality}}, diffs[insPoint:]...)...) + + // Change second copy to insert. + diffs[insPoint+1].Type = DiffInsert + // Throw away the equality we just deleted. + equalities = equalities.next + + if equalities != nil { + equalities = equalities.next + } + if equalities != nil { + pointer = equalities.data + } else { + pointer = -1 + } + + lengthInsertions1 = 0 // Reset the counters. + lengthDeletions1 = 0 + lengthInsertions2 = 0 + lengthDeletions2 = 0 + lastequality = "" + changes = true + } + } + pointer++ + } + + // Normalize the diff. + if changes { + diffs = dmp.DiffCleanupMerge(diffs) + } + diffs = dmp.DiffCleanupSemanticLossless(diffs) + // Find any overlaps between deletions and insertions. + // e.g: abcxxxxxxdef + // -> abcxxxdef + // e.g: xxxabcdefxxx + // -> defxxxabc + // Only extract an overlap if it is as big as the edit ahead or behind it. + pointer = 1 + for pointer < len(diffs) { + if diffs[pointer-1].Type == DiffDelete && + diffs[pointer].Type == DiffInsert { + deletion := diffs[pointer-1].Text + insertion := diffs[pointer].Text + overlapLength1 := dmp.DiffCommonOverlap(deletion, insertion) + overlapLength2 := dmp.DiffCommonOverlap(insertion, deletion) + if overlapLength1 >= overlapLength2 { + if float64(overlapLength1) >= float64(len(deletion))/2 || + float64(overlapLength1) >= float64(len(insertion))/2 { + + // Overlap found. Insert an equality and trim the surrounding edits. + diffs = append( + diffs[:pointer], + append([]Diff{Diff{DiffEqual, insertion[:overlapLength1]}}, diffs[pointer:]...)...) + + diffs[pointer-1].Text = + deletion[0 : len(deletion)-overlapLength1] + diffs[pointer+1].Text = insertion[overlapLength1:] + pointer++ + } + } else { + if float64(overlapLength2) >= float64(len(deletion))/2 || + float64(overlapLength2) >= float64(len(insertion))/2 { + // Reverse overlap found. Insert an equality and swap and trim the surrounding edits. + overlap := Diff{DiffEqual, deletion[:overlapLength2]} + diffs = append( + diffs[:pointer], + append([]Diff{overlap}, diffs[pointer:]...)...) + + diffs[pointer-1].Type = DiffInsert + diffs[pointer-1].Text = insertion[0 : len(insertion)-overlapLength2] + diffs[pointer+1].Type = DiffDelete + diffs[pointer+1].Text = deletion[overlapLength2:] + pointer++ + } + } + pointer++ + } + pointer++ + } + + return diffs +} + +// Define some regex patterns for matching boundaries. +var ( + nonAlphaNumericRegex = regexp.MustCompile(`[^a-zA-Z0-9]`) + whitespaceRegex = regexp.MustCompile(`\s`) + linebreakRegex = regexp.MustCompile(`[\r\n]`) + blanklineEndRegex = regexp.MustCompile(`\n\r?\n$`) + blanklineStartRegex = regexp.MustCompile(`^\r?\n\r?\n`) +) + +// diffCleanupSemanticScore computes a score representing whether the internal boundary falls on logical boundaries. +// Scores range from 6 (best) to 0 (worst). Closure, but does not reference any external variables. +func diffCleanupSemanticScore(one, two string) int { + if len(one) == 0 || len(two) == 0 { + // Edges are the best. + return 6 + } + + // Each port of this function behaves slightly differently due to subtle differences in each language's definition of things like 'whitespace'. Since this function's purpose is largely cosmetic, the choice has been made to use each language's native features rather than force total conformity. + rune1, _ := utf8.DecodeLastRuneInString(one) + rune2, _ := utf8.DecodeRuneInString(two) + char1 := string(rune1) + char2 := string(rune2) + + nonAlphaNumeric1 := nonAlphaNumericRegex.MatchString(char1) + nonAlphaNumeric2 := nonAlphaNumericRegex.MatchString(char2) + whitespace1 := nonAlphaNumeric1 && whitespaceRegex.MatchString(char1) + whitespace2 := nonAlphaNumeric2 && whitespaceRegex.MatchString(char2) + lineBreak1 := whitespace1 && linebreakRegex.MatchString(char1) + lineBreak2 := whitespace2 && linebreakRegex.MatchString(char2) + blankLine1 := lineBreak1 && blanklineEndRegex.MatchString(one) + blankLine2 := lineBreak2 && blanklineEndRegex.MatchString(two) + + if blankLine1 || blankLine2 { + // Five points for blank lines. + return 5 + } else if lineBreak1 || lineBreak2 { + // Four points for line breaks. + return 4 + } else if nonAlphaNumeric1 && !whitespace1 && whitespace2 { + // Three points for end of sentences. + return 3 + } else if whitespace1 || whitespace2 { + // Two points for whitespace. + return 2 + } else if nonAlphaNumeric1 || nonAlphaNumeric2 { + // One point for non-alphanumeric. + return 1 + } + return 0 +} + +// DiffCleanupSemanticLossless looks for single edits surrounded on both sides by equalities which can be shifted sideways to align the edit to a word boundary. +// E.g: The cat came. -> The cat came. +func (dmp *DiffMatchPatch) DiffCleanupSemanticLossless(diffs []Diff) []Diff { + pointer := 1 + + // Intentionally ignore the first and last element (don't need checking). + for pointer < len(diffs)-1 { + if diffs[pointer-1].Type == DiffEqual && + diffs[pointer+1].Type == DiffEqual { + + // This is a single edit surrounded by equalities. + equality1 := diffs[pointer-1].Text + edit := diffs[pointer].Text + equality2 := diffs[pointer+1].Text + + // First, shift the edit as far left as possible. + commonOffset := dmp.DiffCommonSuffix(equality1, edit) + if commonOffset > 0 { + commonString := edit[len(edit)-commonOffset:] + equality1 = equality1[0 : len(equality1)-commonOffset] + edit = commonString + edit[:len(edit)-commonOffset] + equality2 = commonString + equality2 + } + + // Second, step character by character right, looking for the best fit. + bestEquality1 := equality1 + bestEdit := edit + bestEquality2 := equality2 + bestScore := diffCleanupSemanticScore(equality1, edit) + + diffCleanupSemanticScore(edit, equality2) + + for len(edit) != 0 && len(equality2) != 0 { + _, sz := utf8.DecodeRuneInString(edit) + if len(equality2) < sz || edit[:sz] != equality2[:sz] { + break + } + equality1 += edit[:sz] + edit = edit[sz:] + equality2[:sz] + equality2 = equality2[sz:] + score := diffCleanupSemanticScore(equality1, edit) + + diffCleanupSemanticScore(edit, equality2) + // The >= encourages trailing rather than leading whitespace on edits. + if score >= bestScore { + bestScore = score + bestEquality1 = equality1 + bestEdit = edit + bestEquality2 = equality2 + } + } + + if diffs[pointer-1].Text != bestEquality1 { + // We have an improvement, save it back to the diff. + if len(bestEquality1) != 0 { + diffs[pointer-1].Text = bestEquality1 + } else { + diffs = splice(diffs, pointer-1, 1) + pointer-- + } + + diffs[pointer].Text = bestEdit + if len(bestEquality2) != 0 { + diffs[pointer+1].Text = bestEquality2 + } else { + diffs = append(diffs[:pointer+1], diffs[pointer+2:]...) + pointer-- + } + } + } + pointer++ + } + + return diffs +} + +// DiffCleanupEfficiency reduces the number of edits by eliminating operationally trivial equalities. +func (dmp *DiffMatchPatch) DiffCleanupEfficiency(diffs []Diff) []Diff { + changes := false + // Stack of indices where equalities are found. + type equality struct { + data int + next *equality + } + var equalities *equality + // Always equal to equalities[equalitiesLength-1][1] + lastequality := "" + pointer := 0 // Index of current position. + // Is there an insertion operation before the last equality. + preIns := false + // Is there a deletion operation before the last equality. + preDel := false + // Is there an insertion operation after the last equality. + postIns := false + // Is there a deletion operation after the last equality. + postDel := false + for pointer < len(diffs) { + if diffs[pointer].Type == DiffEqual { // Equality found. + if len(diffs[pointer].Text) < dmp.DiffEditCost && + (postIns || postDel) { + // Candidate found. + equalities = &equality{ + data: pointer, + next: equalities, + } + preIns = postIns + preDel = postDel + lastequality = diffs[pointer].Text + } else { + // Not a candidate, and can never become one. + equalities = nil + lastequality = "" + } + postIns = false + postDel = false + } else { // An insertion or deletion. + if diffs[pointer].Type == DiffDelete { + postDel = true + } else { + postIns = true + } + + // Five types to be split: + // ABXYCD + // AXCD + // ABXC + // AXCD + // ABXC + var sumPres int + if preIns { + sumPres++ + } + if preDel { + sumPres++ + } + if postIns { + sumPres++ + } + if postDel { + sumPres++ + } + if len(lastequality) > 0 && + ((preIns && preDel && postIns && postDel) || + ((len(lastequality) < dmp.DiffEditCost/2) && sumPres == 3)) { + + insPoint := equalities.data + + // Duplicate record. + diffs = append(diffs[:insPoint], + append([]Diff{Diff{DiffDelete, lastequality}}, diffs[insPoint:]...)...) + + // Change second copy to insert. + diffs[insPoint+1].Type = DiffInsert + // Throw away the equality we just deleted. + equalities = equalities.next + lastequality = "" + + if preIns && preDel { + // No changes made which could affect previous entry, keep going. + postIns = true + postDel = true + equalities = nil + } else { + if equalities != nil { + equalities = equalities.next + } + if equalities != nil { + pointer = equalities.data + } else { + pointer = -1 + } + postIns = false + postDel = false + } + changes = true + } + } + pointer++ + } + + if changes { + diffs = dmp.DiffCleanupMerge(diffs) + } + + return diffs +} + +// DiffCleanupMerge reorders and merges like edit sections. Merge equalities. +// Any edit section can move as long as it doesn't cross an equality. +func (dmp *DiffMatchPatch) DiffCleanupMerge(diffs []Diff) []Diff { + // Add a dummy entry at the end. + diffs = append(diffs, Diff{DiffEqual, ""}) + pointer := 0 + countDelete := 0 + countInsert := 0 + commonlength := 0 + textDelete := []rune(nil) + textInsert := []rune(nil) + + for pointer < len(diffs) { + switch diffs[pointer].Type { + case DiffInsert: + countInsert++ + textInsert = append(textInsert, []rune(diffs[pointer].Text)...) + pointer++ + break + case DiffDelete: + countDelete++ + textDelete = append(textDelete, []rune(diffs[pointer].Text)...) + pointer++ + break + case DiffEqual: + // Upon reaching an equality, check for prior redundancies. + if countDelete+countInsert > 1 { + if countDelete != 0 && countInsert != 0 { + // Factor out any common prefixies. + commonlength = commonPrefixLength(textInsert, textDelete) + if commonlength != 0 { + x := pointer - countDelete - countInsert + if x > 0 && diffs[x-1].Type == DiffEqual { + diffs[x-1].Text += string(textInsert[:commonlength]) + } else { + diffs = append([]Diff{Diff{DiffEqual, string(textInsert[:commonlength])}}, diffs...) + pointer++ + } + textInsert = textInsert[commonlength:] + textDelete = textDelete[commonlength:] + } + // Factor out any common suffixies. + commonlength = commonSuffixLength(textInsert, textDelete) + if commonlength != 0 { + insertIndex := len(textInsert) - commonlength + deleteIndex := len(textDelete) - commonlength + diffs[pointer].Text = string(textInsert[insertIndex:]) + diffs[pointer].Text + textInsert = textInsert[:insertIndex] + textDelete = textDelete[:deleteIndex] + } + } + // Delete the offending records and add the merged ones. + if countDelete == 0 { + diffs = splice(diffs, pointer-countInsert, + countDelete+countInsert, + Diff{DiffInsert, string(textInsert)}) + } else if countInsert == 0 { + diffs = splice(diffs, pointer-countDelete, + countDelete+countInsert, + Diff{DiffDelete, string(textDelete)}) + } else { + diffs = splice(diffs, pointer-countDelete-countInsert, + countDelete+countInsert, + Diff{DiffDelete, string(textDelete)}, + Diff{DiffInsert, string(textInsert)}) + } + + pointer = pointer - countDelete - countInsert + 1 + if countDelete != 0 { + pointer++ + } + if countInsert != 0 { + pointer++ + } + } else if pointer != 0 && diffs[pointer-1].Type == DiffEqual { + // Merge this equality with the previous one. + diffs[pointer-1].Text += diffs[pointer].Text + diffs = append(diffs[:pointer], diffs[pointer+1:]...) + } else { + pointer++ + } + countInsert = 0 + countDelete = 0 + textDelete = nil + textInsert = nil + break + } + } + + if len(diffs[len(diffs)-1].Text) == 0 { + diffs = diffs[0 : len(diffs)-1] // Remove the dummy entry at the end. + } + + // Second pass: look for single edits surrounded on both sides by equalities which can be shifted sideways to eliminate an equality. E.g: ABAC -> ABAC + changes := false + pointer = 1 + // Intentionally ignore the first and last element (don't need checking). + for pointer < (len(diffs) - 1) { + if diffs[pointer-1].Type == DiffEqual && + diffs[pointer+1].Type == DiffEqual { + // This is a single edit surrounded by equalities. + if strings.HasSuffix(diffs[pointer].Text, diffs[pointer-1].Text) { + // Shift the edit over the previous equality. + diffs[pointer].Text = diffs[pointer-1].Text + + diffs[pointer].Text[:len(diffs[pointer].Text)-len(diffs[pointer-1].Text)] + diffs[pointer+1].Text = diffs[pointer-1].Text + diffs[pointer+1].Text + diffs = splice(diffs, pointer-1, 1) + changes = true + } else if strings.HasPrefix(diffs[pointer].Text, diffs[pointer+1].Text) { + // Shift the edit over the next equality. + diffs[pointer-1].Text += diffs[pointer+1].Text + diffs[pointer].Text = + diffs[pointer].Text[len(diffs[pointer+1].Text):] + diffs[pointer+1].Text + diffs = splice(diffs, pointer+1, 1) + changes = true + } + } + pointer++ + } + + // If shifts were made, the diff needs reordering and another shift sweep. + if changes { + diffs = dmp.DiffCleanupMerge(diffs) + } + + return diffs +} + +// DiffXIndex returns the equivalent location in s2. +func (dmp *DiffMatchPatch) DiffXIndex(diffs []Diff, loc int) int { + chars1 := 0 + chars2 := 0 + lastChars1 := 0 + lastChars2 := 0 + lastDiff := Diff{} + for i := 0; i < len(diffs); i++ { + aDiff := diffs[i] + if aDiff.Type != DiffInsert { + // Equality or deletion. + chars1 += len(aDiff.Text) + } + if aDiff.Type != DiffDelete { + // Equality or insertion. + chars2 += len(aDiff.Text) + } + if chars1 > loc { + // Overshot the location. + lastDiff = aDiff + break + } + lastChars1 = chars1 + lastChars2 = chars2 + } + if lastDiff.Type == DiffDelete { + // The location was deleted. + return lastChars2 + } + // Add the remaining character length. + return lastChars2 + (loc - lastChars1) +} + +// DiffPrettyHtml converts a []Diff into a pretty HTML report. +// It is intended as an example from which to write one's own display functions. +func (dmp *DiffMatchPatch) DiffPrettyHtml(diffs []Diff) string { + var buff bytes.Buffer + for _, diff := range diffs { + text := strings.Replace(html.EscapeString(diff.Text), "\n", "¶
", -1) + switch diff.Type { + case DiffInsert: + _, _ = buff.WriteString("") + _, _ = buff.WriteString(text) + _, _ = buff.WriteString("") + case DiffDelete: + _, _ = buff.WriteString("") + _, _ = buff.WriteString(text) + _, _ = buff.WriteString("") + case DiffEqual: + _, _ = buff.WriteString("") + _, _ = buff.WriteString(text) + _, _ = buff.WriteString("") + } + } + return buff.String() +} + +// DiffPrettyText converts a []Diff into a colored text report. +func (dmp *DiffMatchPatch) DiffPrettyText(diffs []Diff) string { + var buff bytes.Buffer + for _, diff := range diffs { + text := diff.Text + + switch diff.Type { + case DiffInsert: + _, _ = buff.WriteString("\x1b[32m") + _, _ = buff.WriteString(text) + _, _ = buff.WriteString("\x1b[0m") + case DiffDelete: + _, _ = buff.WriteString("\x1b[31m") + _, _ = buff.WriteString(text) + _, _ = buff.WriteString("\x1b[0m") + case DiffEqual: + _, _ = buff.WriteString(text) + } + } + + return buff.String() +} + +// DiffText1 computes and returns the source text (all equalities and deletions). +func (dmp *DiffMatchPatch) DiffText1(diffs []Diff) string { + //StringBuilder text = new StringBuilder() + var text bytes.Buffer + + for _, aDiff := range diffs { + if aDiff.Type != DiffInsert { + _, _ = text.WriteString(aDiff.Text) + } + } + return text.String() +} + +// DiffText2 computes and returns the destination text (all equalities and insertions). +func (dmp *DiffMatchPatch) DiffText2(diffs []Diff) string { + var text bytes.Buffer + + for _, aDiff := range diffs { + if aDiff.Type != DiffDelete { + _, _ = text.WriteString(aDiff.Text) + } + } + return text.String() +} + +// DiffLevenshtein computes the Levenshtein distance that is the number of inserted, deleted or substituted characters. +func (dmp *DiffMatchPatch) DiffLevenshtein(diffs []Diff) int { + levenshtein := 0 + insertions := 0 + deletions := 0 + + for _, aDiff := range diffs { + switch aDiff.Type { + case DiffInsert: + insertions += len(aDiff.Text) + case DiffDelete: + deletions += len(aDiff.Text) + case DiffEqual: + // A deletion and an insertion is one substitution. + levenshtein += max(insertions, deletions) + insertions = 0 + deletions = 0 + } + } + + levenshtein += max(insertions, deletions) + return levenshtein +} + +// DiffToDelta crushes the diff into an encoded string which describes the operations required to transform text1 into text2. +// E.g. =3\t-2\t+ing -> Keep 3 chars, delete 2 chars, insert 'ing'. Operations are tab-separated. Inserted text is escaped using %xx notation. +func (dmp *DiffMatchPatch) DiffToDelta(diffs []Diff) string { + var text bytes.Buffer + for _, aDiff := range diffs { + switch aDiff.Type { + case DiffInsert: + _, _ = text.WriteString("+") + _, _ = text.WriteString(strings.Replace(url.QueryEscape(aDiff.Text), "+", " ", -1)) + _, _ = text.WriteString("\t") + break + case DiffDelete: + _, _ = text.WriteString("-") + _, _ = text.WriteString(strconv.Itoa(utf8.RuneCountInString(aDiff.Text))) + _, _ = text.WriteString("\t") + break + case DiffEqual: + _, _ = text.WriteString("=") + _, _ = text.WriteString(strconv.Itoa(utf8.RuneCountInString(aDiff.Text))) + _, _ = text.WriteString("\t") + break + } + } + delta := text.String() + if len(delta) != 0 { + // Strip off trailing tab character. + delta = delta[0 : utf8.RuneCountInString(delta)-1] + delta = unescaper.Replace(delta) + } + return delta +} + +// DiffFromDelta given the original text1, and an encoded string which describes the operations required to transform text1 into text2, comAdde the full diff. +func (dmp *DiffMatchPatch) DiffFromDelta(text1 string, delta string) (diffs []Diff, err error) { + i := 0 + runes := []rune(text1) + + for _, token := range strings.Split(delta, "\t") { + if len(token) == 0 { + // Blank tokens are ok (from a trailing \t). + continue + } + + // Each token begins with a one character parameter which specifies the operation of this token (delete, insert, equality). + param := token[1:] + + switch op := token[0]; op { + case '+': + // Decode would Diff all "+" to " " + param = strings.Replace(param, "+", "%2b", -1) + param, err = url.QueryUnescape(param) + if err != nil { + return nil, err + } + if !utf8.ValidString(param) { + return nil, fmt.Errorf("invalid UTF-8 token: %q", param) + } + + diffs = append(diffs, Diff{DiffInsert, param}) + case '=', '-': + n, err := strconv.ParseInt(param, 10, 0) + if err != nil { + return nil, err + } else if n < 0 { + return nil, errors.New("Negative number in DiffFromDelta: " + param) + } + + i += int(n) + // Break out if we are out of bounds, go1.6 can't handle this very well + if i > len(runes) { + break + } + // Remember that string slicing is by byte - we want by rune here. + text := string(runes[i-int(n) : i]) + + if op == '=' { + diffs = append(diffs, Diff{DiffEqual, text}) + } else { + diffs = append(diffs, Diff{DiffDelete, text}) + } + default: + // Anything else is an error. + return nil, errors.New("Invalid diff operation in DiffFromDelta: " + string(token[0])) + } + } + + if i != len(runes) { + return nil, fmt.Errorf("Delta length (%v) is different from source text length (%v)", i, len(text1)) + } + + return diffs, nil +} diff --git a/vendor/github.com/sergi/go-diff/diffmatchpatch/diffmatchpatch.go b/vendor/github.com/sergi/go-diff/diffmatchpatch/diffmatchpatch.go new file mode 100644 index 00000000..d3acc32c --- /dev/null +++ b/vendor/github.com/sergi/go-diff/diffmatchpatch/diffmatchpatch.go @@ -0,0 +1,46 @@ +// Copyright (c) 2012-2016 The go-diff authors. All rights reserved. +// https://github.com/sergi/go-diff +// See the included LICENSE file for license details. +// +// go-diff is a Go implementation of Google's Diff, Match, and Patch library +// Original library is Copyright (c) 2006 Google Inc. +// http://code.google.com/p/google-diff-match-patch/ + +// Package diffmatchpatch offers robust algorithms to perform the operations required for synchronizing plain text. +package diffmatchpatch + +import ( + "time" +) + +// DiffMatchPatch holds the configuration for diff-match-patch operations. +type DiffMatchPatch struct { + // Number of seconds to map a diff before giving up (0 for infinity). + DiffTimeout time.Duration + // Cost of an empty edit operation in terms of edit characters. + DiffEditCost int + // How far to search for a match (0 = exact location, 1000+ = broad match). A match this many characters away from the expected location will add 1.0 to the score (0.0 is a perfect match). + MatchDistance int + // When deleting a large block of text (over ~64 characters), how close do the contents have to be to match the expected contents. (0.0 = perfection, 1.0 = very loose). Note that MatchThreshold controls how closely the end points of a delete need to match. + PatchDeleteThreshold float64 + // Chunk size for context length. + PatchMargin int + // The number of bits in an int. + MatchMaxBits int + // At what point is no match declared (0.0 = perfection, 1.0 = very loose). + MatchThreshold float64 +} + +// New creates a new DiffMatchPatch object with default parameters. +func New() *DiffMatchPatch { + // Defaults. + return &DiffMatchPatch{ + DiffTimeout: time.Second, + DiffEditCost: 4, + MatchThreshold: 0.5, + MatchDistance: 1000, + PatchDeleteThreshold: 0.5, + PatchMargin: 4, + MatchMaxBits: 32, + } +} diff --git a/vendor/github.com/sergi/go-diff/diffmatchpatch/match.go b/vendor/github.com/sergi/go-diff/diffmatchpatch/match.go new file mode 100644 index 00000000..17374e10 --- /dev/null +++ b/vendor/github.com/sergi/go-diff/diffmatchpatch/match.go @@ -0,0 +1,160 @@ +// Copyright (c) 2012-2016 The go-diff authors. All rights reserved. +// https://github.com/sergi/go-diff +// See the included LICENSE file for license details. +// +// go-diff is a Go implementation of Google's Diff, Match, and Patch library +// Original library is Copyright (c) 2006 Google Inc. +// http://code.google.com/p/google-diff-match-patch/ + +package diffmatchpatch + +import ( + "math" +) + +// MatchMain locates the best instance of 'pattern' in 'text' near 'loc'. +// Returns -1 if no match found. +func (dmp *DiffMatchPatch) MatchMain(text, pattern string, loc int) int { + // Check for null inputs not needed since null can't be passed in C#. + + loc = int(math.Max(0, math.Min(float64(loc), float64(len(text))))) + if text == pattern { + // Shortcut (potentially not guaranteed by the algorithm) + return 0 + } else if len(text) == 0 { + // Nothing to match. + return -1 + } else if loc+len(pattern) <= len(text) && text[loc:loc+len(pattern)] == pattern { + // Perfect match at the perfect spot! (Includes case of null pattern) + return loc + } + // Do a fuzzy compare. + return dmp.MatchBitap(text, pattern, loc) +} + +// MatchBitap locates the best instance of 'pattern' in 'text' near 'loc' using the Bitap algorithm. +// Returns -1 if no match was found. +func (dmp *DiffMatchPatch) MatchBitap(text, pattern string, loc int) int { + // Initialise the alphabet. + s := dmp.MatchAlphabet(pattern) + + // Highest score beyond which we give up. + scoreThreshold := dmp.MatchThreshold + // Is there a nearby exact match? (speedup) + bestLoc := indexOf(text, pattern, loc) + if bestLoc != -1 { + scoreThreshold = math.Min(dmp.matchBitapScore(0, bestLoc, loc, + pattern), scoreThreshold) + // What about in the other direction? (speedup) + bestLoc = lastIndexOf(text, pattern, loc+len(pattern)) + if bestLoc != -1 { + scoreThreshold = math.Min(dmp.matchBitapScore(0, bestLoc, loc, + pattern), scoreThreshold) + } + } + + // Initialise the bit arrays. + matchmask := 1 << uint((len(pattern) - 1)) + bestLoc = -1 + + var binMin, binMid int + binMax := len(pattern) + len(text) + lastRd := []int{} + for d := 0; d < len(pattern); d++ { + // Scan for the best match; each iteration allows for one more error. Run a binary search to determine how far from 'loc' we can stray at this error level. + binMin = 0 + binMid = binMax + for binMin < binMid { + if dmp.matchBitapScore(d, loc+binMid, loc, pattern) <= scoreThreshold { + binMin = binMid + } else { + binMax = binMid + } + binMid = (binMax-binMin)/2 + binMin + } + // Use the result from this iteration as the maximum for the next. + binMax = binMid + start := int(math.Max(1, float64(loc-binMid+1))) + finish := int(math.Min(float64(loc+binMid), float64(len(text))) + float64(len(pattern))) + + rd := make([]int, finish+2) + rd[finish+1] = (1 << uint(d)) - 1 + + for j := finish; j >= start; j-- { + var charMatch int + if len(text) <= j-1 { + // Out of range. + charMatch = 0 + } else if _, ok := s[text[j-1]]; !ok { + charMatch = 0 + } else { + charMatch = s[text[j-1]] + } + + if d == 0 { + // First pass: exact match. + rd[j] = ((rd[j+1] << 1) | 1) & charMatch + } else { + // Subsequent passes: fuzzy match. + rd[j] = ((rd[j+1]<<1)|1)&charMatch | (((lastRd[j+1] | lastRd[j]) << 1) | 1) | lastRd[j+1] + } + if (rd[j] & matchmask) != 0 { + score := dmp.matchBitapScore(d, j-1, loc, pattern) + // This match will almost certainly be better than any existing match. But check anyway. + if score <= scoreThreshold { + // Told you so. + scoreThreshold = score + bestLoc = j - 1 + if bestLoc > loc { + // When passing loc, don't exceed our current distance from loc. + start = int(math.Max(1, float64(2*loc-bestLoc))) + } else { + // Already passed loc, downhill from here on in. + break + } + } + } + } + if dmp.matchBitapScore(d+1, loc, loc, pattern) > scoreThreshold { + // No hope for a (better) match at greater error levels. + break + } + lastRd = rd + } + return bestLoc +} + +// matchBitapScore computes and returns the score for a match with e errors and x location. +func (dmp *DiffMatchPatch) matchBitapScore(e, x, loc int, pattern string) float64 { + accuracy := float64(e) / float64(len(pattern)) + proximity := math.Abs(float64(loc - x)) + if dmp.MatchDistance == 0 { + // Dodge divide by zero error. + if proximity == 0 { + return accuracy + } + + return 1.0 + } + return accuracy + (proximity / float64(dmp.MatchDistance)) +} + +// MatchAlphabet initialises the alphabet for the Bitap algorithm. +func (dmp *DiffMatchPatch) MatchAlphabet(pattern string) map[byte]int { + s := map[byte]int{} + charPattern := []byte(pattern) + for _, c := range charPattern { + _, ok := s[c] + if !ok { + s[c] = 0 + } + } + i := 0 + + for _, c := range charPattern { + value := s[c] | int(uint(1)< y { + return x + } + return y +} diff --git a/vendor/github.com/sergi/go-diff/diffmatchpatch/patch.go b/vendor/github.com/sergi/go-diff/diffmatchpatch/patch.go new file mode 100644 index 00000000..223c43c4 --- /dev/null +++ b/vendor/github.com/sergi/go-diff/diffmatchpatch/patch.go @@ -0,0 +1,556 @@ +// Copyright (c) 2012-2016 The go-diff authors. All rights reserved. +// https://github.com/sergi/go-diff +// See the included LICENSE file for license details. +// +// go-diff is a Go implementation of Google's Diff, Match, and Patch library +// Original library is Copyright (c) 2006 Google Inc. +// http://code.google.com/p/google-diff-match-patch/ + +package diffmatchpatch + +import ( + "bytes" + "errors" + "math" + "net/url" + "regexp" + "strconv" + "strings" +) + +// Patch represents one patch operation. +type Patch struct { + diffs []Diff + Start1 int + Start2 int + Length1 int + Length2 int +} + +// String emulates GNU diff's format. +// Header: @@ -382,8 +481,9 @@ +// Indices are printed as 1-based, not 0-based. +func (p *Patch) String() string { + var coords1, coords2 string + + if p.Length1 == 0 { + coords1 = strconv.Itoa(p.Start1) + ",0" + } else if p.Length1 == 1 { + coords1 = strconv.Itoa(p.Start1 + 1) + } else { + coords1 = strconv.Itoa(p.Start1+1) + "," + strconv.Itoa(p.Length1) + } + + if p.Length2 == 0 { + coords2 = strconv.Itoa(p.Start2) + ",0" + } else if p.Length2 == 1 { + coords2 = strconv.Itoa(p.Start2 + 1) + } else { + coords2 = strconv.Itoa(p.Start2+1) + "," + strconv.Itoa(p.Length2) + } + + var text bytes.Buffer + _, _ = text.WriteString("@@ -" + coords1 + " +" + coords2 + " @@\n") + + // Escape the body of the patch with %xx notation. + for _, aDiff := range p.diffs { + switch aDiff.Type { + case DiffInsert: + _, _ = text.WriteString("+") + case DiffDelete: + _, _ = text.WriteString("-") + case DiffEqual: + _, _ = text.WriteString(" ") + } + + _, _ = text.WriteString(strings.Replace(url.QueryEscape(aDiff.Text), "+", " ", -1)) + _, _ = text.WriteString("\n") + } + + return unescaper.Replace(text.String()) +} + +// PatchAddContext increases the context until it is unique, but doesn't let the pattern expand beyond MatchMaxBits. +func (dmp *DiffMatchPatch) PatchAddContext(patch Patch, text string) Patch { + if len(text) == 0 { + return patch + } + + pattern := text[patch.Start2 : patch.Start2+patch.Length1] + padding := 0 + + // Look for the first and last matches of pattern in text. If two different matches are found, increase the pattern length. + for strings.Index(text, pattern) != strings.LastIndex(text, pattern) && + len(pattern) < dmp.MatchMaxBits-2*dmp.PatchMargin { + padding += dmp.PatchMargin + maxStart := max(0, patch.Start2-padding) + minEnd := min(len(text), patch.Start2+patch.Length1+padding) + pattern = text[maxStart:minEnd] + } + // Add one chunk for good luck. + padding += dmp.PatchMargin + + // Add the prefix. + prefix := text[max(0, patch.Start2-padding):patch.Start2] + if len(prefix) != 0 { + patch.diffs = append([]Diff{Diff{DiffEqual, prefix}}, patch.diffs...) + } + // Add the suffix. + suffix := text[patch.Start2+patch.Length1 : min(len(text), patch.Start2+patch.Length1+padding)] + if len(suffix) != 0 { + patch.diffs = append(patch.diffs, Diff{DiffEqual, suffix}) + } + + // Roll back the start points. + patch.Start1 -= len(prefix) + patch.Start2 -= len(prefix) + // Extend the lengths. + patch.Length1 += len(prefix) + len(suffix) + patch.Length2 += len(prefix) + len(suffix) + + return patch +} + +// PatchMake computes a list of patches. +func (dmp *DiffMatchPatch) PatchMake(opt ...interface{}) []Patch { + if len(opt) == 1 { + diffs, _ := opt[0].([]Diff) + text1 := dmp.DiffText1(diffs) + return dmp.PatchMake(text1, diffs) + } else if len(opt) == 2 { + text1 := opt[0].(string) + switch t := opt[1].(type) { + case string: + diffs := dmp.DiffMain(text1, t, true) + if len(diffs) > 2 { + diffs = dmp.DiffCleanupSemantic(diffs) + diffs = dmp.DiffCleanupEfficiency(diffs) + } + return dmp.PatchMake(text1, diffs) + case []Diff: + return dmp.patchMake2(text1, t) + } + } else if len(opt) == 3 { + return dmp.PatchMake(opt[0], opt[2]) + } + return []Patch{} +} + +// patchMake2 computes a list of patches to turn text1 into text2. +// text2 is not provided, diffs are the delta between text1 and text2. +func (dmp *DiffMatchPatch) patchMake2(text1 string, diffs []Diff) []Patch { + // Check for null inputs not needed since null can't be passed in C#. + patches := []Patch{} + if len(diffs) == 0 { + return patches // Get rid of the null case. + } + + patch := Patch{} + charCount1 := 0 // Number of characters into the text1 string. + charCount2 := 0 // Number of characters into the text2 string. + // Start with text1 (prepatchText) and apply the diffs until we arrive at text2 (postpatchText). We recreate the patches one by one to determine context info. + prepatchText := text1 + postpatchText := text1 + + for i, aDiff := range diffs { + if len(patch.diffs) == 0 && aDiff.Type != DiffEqual { + // A new patch starts here. + patch.Start1 = charCount1 + patch.Start2 = charCount2 + } + + switch aDiff.Type { + case DiffInsert: + patch.diffs = append(patch.diffs, aDiff) + patch.Length2 += len(aDiff.Text) + postpatchText = postpatchText[:charCount2] + + aDiff.Text + postpatchText[charCount2:] + case DiffDelete: + patch.Length1 += len(aDiff.Text) + patch.diffs = append(patch.diffs, aDiff) + postpatchText = postpatchText[:charCount2] + postpatchText[charCount2+len(aDiff.Text):] + case DiffEqual: + if len(aDiff.Text) <= 2*dmp.PatchMargin && + len(patch.diffs) != 0 && i != len(diffs)-1 { + // Small equality inside a patch. + patch.diffs = append(patch.diffs, aDiff) + patch.Length1 += len(aDiff.Text) + patch.Length2 += len(aDiff.Text) + } + if len(aDiff.Text) >= 2*dmp.PatchMargin { + // Time for a new patch. + if len(patch.diffs) != 0 { + patch = dmp.PatchAddContext(patch, prepatchText) + patches = append(patches, patch) + patch = Patch{} + // Unlike Unidiff, our patch lists have a rolling context. http://code.google.com/p/google-diff-match-patch/wiki/Unidiff Update prepatch text & pos to reflect the application of the just completed patch. + prepatchText = postpatchText + charCount1 = charCount2 + } + } + } + + // Update the current character count. + if aDiff.Type != DiffInsert { + charCount1 += len(aDiff.Text) + } + if aDiff.Type != DiffDelete { + charCount2 += len(aDiff.Text) + } + } + + // Pick up the leftover patch if not empty. + if len(patch.diffs) != 0 { + patch = dmp.PatchAddContext(patch, prepatchText) + patches = append(patches, patch) + } + + return patches +} + +// PatchDeepCopy returns an array that is identical to a given an array of patches. +func (dmp *DiffMatchPatch) PatchDeepCopy(patches []Patch) []Patch { + patchesCopy := []Patch{} + for _, aPatch := range patches { + patchCopy := Patch{} + for _, aDiff := range aPatch.diffs { + patchCopy.diffs = append(patchCopy.diffs, Diff{ + aDiff.Type, + aDiff.Text, + }) + } + patchCopy.Start1 = aPatch.Start1 + patchCopy.Start2 = aPatch.Start2 + patchCopy.Length1 = aPatch.Length1 + patchCopy.Length2 = aPatch.Length2 + patchesCopy = append(patchesCopy, patchCopy) + } + return patchesCopy +} + +// PatchApply merges a set of patches onto the text. Returns a patched text, as well as an array of true/false values indicating which patches were applied. +func (dmp *DiffMatchPatch) PatchApply(patches []Patch, text string) (string, []bool) { + if len(patches) == 0 { + return text, []bool{} + } + + // Deep copy the patches so that no changes are made to originals. + patches = dmp.PatchDeepCopy(patches) + + nullPadding := dmp.PatchAddPadding(patches) + text = nullPadding + text + nullPadding + patches = dmp.PatchSplitMax(patches) + + x := 0 + // delta keeps track of the offset between the expected and actual location of the previous patch. If there are patches expected at positions 10 and 20, but the first patch was found at 12, delta is 2 and the second patch has an effective expected position of 22. + delta := 0 + results := make([]bool, len(patches)) + for _, aPatch := range patches { + expectedLoc := aPatch.Start2 + delta + text1 := dmp.DiffText1(aPatch.diffs) + var startLoc int + endLoc := -1 + if len(text1) > dmp.MatchMaxBits { + // PatchSplitMax will only provide an oversized pattern in the case of a monster delete. + startLoc = dmp.MatchMain(text, text1[:dmp.MatchMaxBits], expectedLoc) + if startLoc != -1 { + endLoc = dmp.MatchMain(text, + text1[len(text1)-dmp.MatchMaxBits:], expectedLoc+len(text1)-dmp.MatchMaxBits) + if endLoc == -1 || startLoc >= endLoc { + // Can't find valid trailing context. Drop this patch. + startLoc = -1 + } + } + } else { + startLoc = dmp.MatchMain(text, text1, expectedLoc) + } + if startLoc == -1 { + // No match found. :( + results[x] = false + // Subtract the delta for this failed patch from subsequent patches. + delta -= aPatch.Length2 - aPatch.Length1 + } else { + // Found a match. :) + results[x] = true + delta = startLoc - expectedLoc + var text2 string + if endLoc == -1 { + text2 = text[startLoc:int(math.Min(float64(startLoc+len(text1)), float64(len(text))))] + } else { + text2 = text[startLoc:int(math.Min(float64(endLoc+dmp.MatchMaxBits), float64(len(text))))] + } + if text1 == text2 { + // Perfect match, just shove the Replacement text in. + text = text[:startLoc] + dmp.DiffText2(aPatch.diffs) + text[startLoc+len(text1):] + } else { + // Imperfect match. Run a diff to get a framework of equivalent indices. + diffs := dmp.DiffMain(text1, text2, false) + if len(text1) > dmp.MatchMaxBits && float64(dmp.DiffLevenshtein(diffs))/float64(len(text1)) > dmp.PatchDeleteThreshold { + // The end points match, but the content is unacceptably bad. + results[x] = false + } else { + diffs = dmp.DiffCleanupSemanticLossless(diffs) + index1 := 0 + for _, aDiff := range aPatch.diffs { + if aDiff.Type != DiffEqual { + index2 := dmp.DiffXIndex(diffs, index1) + if aDiff.Type == DiffInsert { + // Insertion + text = text[:startLoc+index2] + aDiff.Text + text[startLoc+index2:] + } else if aDiff.Type == DiffDelete { + // Deletion + startIndex := startLoc + index2 + text = text[:startIndex] + + text[startIndex+dmp.DiffXIndex(diffs, index1+len(aDiff.Text))-index2:] + } + } + if aDiff.Type != DiffDelete { + index1 += len(aDiff.Text) + } + } + } + } + } + x++ + } + // Strip the padding off. + text = text[len(nullPadding) : len(nullPadding)+(len(text)-2*len(nullPadding))] + return text, results +} + +// PatchAddPadding adds some padding on text start and end so that edges can match something. +// Intended to be called only from within patchApply. +func (dmp *DiffMatchPatch) PatchAddPadding(patches []Patch) string { + paddingLength := dmp.PatchMargin + nullPadding := "" + for x := 1; x <= paddingLength; x++ { + nullPadding += string(x) + } + + // Bump all the patches forward. + for i := range patches { + patches[i].Start1 += paddingLength + patches[i].Start2 += paddingLength + } + + // Add some padding on start of first diff. + if len(patches[0].diffs) == 0 || patches[0].diffs[0].Type != DiffEqual { + // Add nullPadding equality. + patches[0].diffs = append([]Diff{Diff{DiffEqual, nullPadding}}, patches[0].diffs...) + patches[0].Start1 -= paddingLength // Should be 0. + patches[0].Start2 -= paddingLength // Should be 0. + patches[0].Length1 += paddingLength + patches[0].Length2 += paddingLength + } else if paddingLength > len(patches[0].diffs[0].Text) { + // Grow first equality. + extraLength := paddingLength - len(patches[0].diffs[0].Text) + patches[0].diffs[0].Text = nullPadding[len(patches[0].diffs[0].Text):] + patches[0].diffs[0].Text + patches[0].Start1 -= extraLength + patches[0].Start2 -= extraLength + patches[0].Length1 += extraLength + patches[0].Length2 += extraLength + } + + // Add some padding on end of last diff. + last := len(patches) - 1 + if len(patches[last].diffs) == 0 || patches[last].diffs[len(patches[last].diffs)-1].Type != DiffEqual { + // Add nullPadding equality. + patches[last].diffs = append(patches[last].diffs, Diff{DiffEqual, nullPadding}) + patches[last].Length1 += paddingLength + patches[last].Length2 += paddingLength + } else if paddingLength > len(patches[last].diffs[len(patches[last].diffs)-1].Text) { + // Grow last equality. + lastDiff := patches[last].diffs[len(patches[last].diffs)-1] + extraLength := paddingLength - len(lastDiff.Text) + patches[last].diffs[len(patches[last].diffs)-1].Text += nullPadding[:extraLength] + patches[last].Length1 += extraLength + patches[last].Length2 += extraLength + } + + return nullPadding +} + +// PatchSplitMax looks through the patches and breaks up any which are longer than the maximum limit of the match algorithm. +// Intended to be called only from within patchApply. +func (dmp *DiffMatchPatch) PatchSplitMax(patches []Patch) []Patch { + patchSize := dmp.MatchMaxBits + for x := 0; x < len(patches); x++ { + if patches[x].Length1 <= patchSize { + continue + } + bigpatch := patches[x] + // Remove the big old patch. + patches = append(patches[:x], patches[x+1:]...) + x-- + + Start1 := bigpatch.Start1 + Start2 := bigpatch.Start2 + precontext := "" + for len(bigpatch.diffs) != 0 { + // Create one of several smaller patches. + patch := Patch{} + empty := true + patch.Start1 = Start1 - len(precontext) + patch.Start2 = Start2 - len(precontext) + if len(precontext) != 0 { + patch.Length1 = len(precontext) + patch.Length2 = len(precontext) + patch.diffs = append(patch.diffs, Diff{DiffEqual, precontext}) + } + for len(bigpatch.diffs) != 0 && patch.Length1 < patchSize-dmp.PatchMargin { + diffType := bigpatch.diffs[0].Type + diffText := bigpatch.diffs[0].Text + if diffType == DiffInsert { + // Insertions are harmless. + patch.Length2 += len(diffText) + Start2 += len(diffText) + patch.diffs = append(patch.diffs, bigpatch.diffs[0]) + bigpatch.diffs = bigpatch.diffs[1:] + empty = false + } else if diffType == DiffDelete && len(patch.diffs) == 1 && patch.diffs[0].Type == DiffEqual && len(diffText) > 2*patchSize { + // This is a large deletion. Let it pass in one chunk. + patch.Length1 += len(diffText) + Start1 += len(diffText) + empty = false + patch.diffs = append(patch.diffs, Diff{diffType, diffText}) + bigpatch.diffs = bigpatch.diffs[1:] + } else { + // Deletion or equality. Only take as much as we can stomach. + diffText = diffText[:min(len(diffText), patchSize-patch.Length1-dmp.PatchMargin)] + + patch.Length1 += len(diffText) + Start1 += len(diffText) + if diffType == DiffEqual { + patch.Length2 += len(diffText) + Start2 += len(diffText) + } else { + empty = false + } + patch.diffs = append(patch.diffs, Diff{diffType, diffText}) + if diffText == bigpatch.diffs[0].Text { + bigpatch.diffs = bigpatch.diffs[1:] + } else { + bigpatch.diffs[0].Text = + bigpatch.diffs[0].Text[len(diffText):] + } + } + } + // Compute the head context for the next patch. + precontext = dmp.DiffText2(patch.diffs) + precontext = precontext[max(0, len(precontext)-dmp.PatchMargin):] + + postcontext := "" + // Append the end context for this patch. + if len(dmp.DiffText1(bigpatch.diffs)) > dmp.PatchMargin { + postcontext = dmp.DiffText1(bigpatch.diffs)[:dmp.PatchMargin] + } else { + postcontext = dmp.DiffText1(bigpatch.diffs) + } + + if len(postcontext) != 0 { + patch.Length1 += len(postcontext) + patch.Length2 += len(postcontext) + if len(patch.diffs) != 0 && patch.diffs[len(patch.diffs)-1].Type == DiffEqual { + patch.diffs[len(patch.diffs)-1].Text += postcontext + } else { + patch.diffs = append(patch.diffs, Diff{DiffEqual, postcontext}) + } + } + if !empty { + x++ + patches = append(patches[:x], append([]Patch{patch}, patches[x:]...)...) + } + } + } + return patches +} + +// PatchToText takes a list of patches and returns a textual representation. +func (dmp *DiffMatchPatch) PatchToText(patches []Patch) string { + var text bytes.Buffer + for _, aPatch := range patches { + _, _ = text.WriteString(aPatch.String()) + } + return text.String() +} + +// PatchFromText parses a textual representation of patches and returns a List of Patch objects. +func (dmp *DiffMatchPatch) PatchFromText(textline string) ([]Patch, error) { + patches := []Patch{} + if len(textline) == 0 { + return patches, nil + } + text := strings.Split(textline, "\n") + textPointer := 0 + patchHeader := regexp.MustCompile("^@@ -(\\d+),?(\\d*) \\+(\\d+),?(\\d*) @@$") + + var patch Patch + var sign uint8 + var line string + for textPointer < len(text) { + + if !patchHeader.MatchString(text[textPointer]) { + return patches, errors.New("Invalid patch string: " + text[textPointer]) + } + + patch = Patch{} + m := patchHeader.FindStringSubmatch(text[textPointer]) + + patch.Start1, _ = strconv.Atoi(m[1]) + if len(m[2]) == 0 { + patch.Start1-- + patch.Length1 = 1 + } else if m[2] == "0" { + patch.Length1 = 0 + } else { + patch.Start1-- + patch.Length1, _ = strconv.Atoi(m[2]) + } + + patch.Start2, _ = strconv.Atoi(m[3]) + + if len(m[4]) == 0 { + patch.Start2-- + patch.Length2 = 1 + } else if m[4] == "0" { + patch.Length2 = 0 + } else { + patch.Start2-- + patch.Length2, _ = strconv.Atoi(m[4]) + } + textPointer++ + + for textPointer < len(text) { + if len(text[textPointer]) > 0 { + sign = text[textPointer][0] + } else { + textPointer++ + continue + } + + line = text[textPointer][1:] + line = strings.Replace(line, "+", "%2b", -1) + line, _ = url.QueryUnescape(line) + if sign == '-' { + // Deletion. + patch.diffs = append(patch.diffs, Diff{DiffDelete, line}) + } else if sign == '+' { + // Insertion. + patch.diffs = append(patch.diffs, Diff{DiffInsert, line}) + } else if sign == ' ' { + // Minor equality. + patch.diffs = append(patch.diffs, Diff{DiffEqual, line}) + } else if sign == '@' { + // Start of next patch. + break + } else { + // WTF? + return patches, errors.New("Invalid patch mode '" + string(sign) + "' in: " + string(line)) + } + textPointer++ + } + + patches = append(patches, patch) + } + return patches, nil +} diff --git a/vendor/github.com/sergi/go-diff/diffmatchpatch/stringutil.go b/vendor/github.com/sergi/go-diff/diffmatchpatch/stringutil.go new file mode 100644 index 00000000..265f29cc --- /dev/null +++ b/vendor/github.com/sergi/go-diff/diffmatchpatch/stringutil.go @@ -0,0 +1,88 @@ +// Copyright (c) 2012-2016 The go-diff authors. All rights reserved. +// https://github.com/sergi/go-diff +// See the included LICENSE file for license details. +// +// go-diff is a Go implementation of Google's Diff, Match, and Patch library +// Original library is Copyright (c) 2006 Google Inc. +// http://code.google.com/p/google-diff-match-patch/ + +package diffmatchpatch + +import ( + "strings" + "unicode/utf8" +) + +// unescaper unescapes selected chars for compatibility with JavaScript's encodeURI. +// In speed critical applications this could be dropped since the receiving application will certainly decode these fine. Note that this function is case-sensitive. Thus "%3F" would not be unescaped. But this is ok because it is only called with the output of HttpUtility.UrlEncode which returns lowercase hex. Example: "%3f" -> "?", "%24" -> "$", etc. +var unescaper = strings.NewReplacer( + "%21", "!", "%7E", "~", "%27", "'", + "%28", "(", "%29", ")", "%3B", ";", + "%2F", "/", "%3F", "?", "%3A", ":", + "%40", "@", "%26", "&", "%3D", "=", + "%2B", "+", "%24", "$", "%2C", ",", "%23", "#", "%2A", "*") + +// indexOf returns the first index of pattern in str, starting at str[i]. +func indexOf(str string, pattern string, i int) int { + if i > len(str)-1 { + return -1 + } + if i <= 0 { + return strings.Index(str, pattern) + } + ind := strings.Index(str[i:], pattern) + if ind == -1 { + return -1 + } + return ind + i +} + +// lastIndexOf returns the last index of pattern in str, starting at str[i]. +func lastIndexOf(str string, pattern string, i int) int { + if i < 0 { + return -1 + } + if i >= len(str) { + return strings.LastIndex(str, pattern) + } + _, size := utf8.DecodeRuneInString(str[i:]) + return strings.LastIndex(str[:i+size], pattern) +} + +// runesIndexOf returns the index of pattern in target, starting at target[i]. +func runesIndexOf(target, pattern []rune, i int) int { + if i > len(target)-1 { + return -1 + } + if i <= 0 { + return runesIndex(target, pattern) + } + ind := runesIndex(target[i:], pattern) + if ind == -1 { + return -1 + } + return ind + i +} + +func runesEqual(r1, r2 []rune) bool { + if len(r1) != len(r2) { + return false + } + for i, c := range r1 { + if c != r2[i] { + return false + } + } + return true +} + +// runesIndex is the equivalent of strings.Index for rune slices. +func runesIndex(r1, r2 []rune) int { + last := len(r1) - len(r2) + for i := 0; i <= last; i++ { + if runesEqual(r1[i:i+len(r2)], r2) { + return i + } + } + return -1 +} diff --git a/vendor/github.com/sirupsen/logrus/LICENSE b/vendor/github.com/sirupsen/logrus/LICENSE new file mode 100644 index 00000000..f090cb42 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Simon Eskildsen + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/sirupsen/logrus/alt_exit.go b/vendor/github.com/sirupsen/logrus/alt_exit.go new file mode 100644 index 00000000..8af90637 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/alt_exit.go @@ -0,0 +1,64 @@ +package logrus + +// The following code was sourced and modified from the +// https://github.com/tebeka/atexit package governed by the following license: +// +// Copyright (c) 2012 Miki Tebeka . +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +// the Software, and to permit persons to whom the Software is furnished to do so, +// subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +import ( + "fmt" + "os" +) + +var handlers = []func(){} + +func runHandler(handler func()) { + defer func() { + if err := recover(); err != nil { + fmt.Fprintln(os.Stderr, "Error: Logrus exit handler error:", err) + } + }() + + handler() +} + +func runHandlers() { + for _, handler := range handlers { + runHandler(handler) + } +} + +// Exit runs all the Logrus atexit handlers and then terminates the program using os.Exit(code) +func Exit(code int) { + runHandlers() + os.Exit(code) +} + +// RegisterExitHandler adds a Logrus Exit handler, call logrus.Exit to invoke +// all handlers. The handlers will also be invoked when any Fatal log entry is +// made. +// +// This method is useful when a caller wishes to use logrus to log a fatal +// message but also needs to gracefully shutdown. An example usecase could be +// closing database connections, or sending a alert that the application is +// closing. +func RegisterExitHandler(handler func()) { + handlers = append(handlers, handler) +} diff --git a/vendor/github.com/sirupsen/logrus/doc.go b/vendor/github.com/sirupsen/logrus/doc.go new file mode 100644 index 00000000..da67aba0 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/doc.go @@ -0,0 +1,26 @@ +/* +Package logrus is a structured logger for Go, completely API compatible with the standard library logger. + + +The simplest way to use Logrus is simply the package-level exported logger: + + package main + + import ( + log "github.com/sirupsen/logrus" + ) + + func main() { + log.WithFields(log.Fields{ + "animal": "walrus", + "number": 1, + "size": 10, + }).Info("A walrus appears") + } + +Output: + time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10 + +For a full guide visit https://github.com/sirupsen/logrus +*/ +package logrus diff --git a/vendor/github.com/sirupsen/logrus/entry.go b/vendor/github.com/sirupsen/logrus/entry.go new file mode 100644 index 00000000..cc85d3aa --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/entry.go @@ -0,0 +1,408 @@ +package logrus + +import ( + "bytes" + "fmt" + "os" + "reflect" + "runtime" + "strings" + "sync" + "time" +) + +var ( + bufferPool *sync.Pool + + // qualified package name, cached at first use + logrusPackage string + + // Positions in the call stack when tracing to report the calling method + minimumCallerDepth int + + // Used for caller information initialisation + callerInitOnce sync.Once +) + +const ( + maximumCallerDepth int = 25 + knownLogrusFrames int = 4 +) + +func init() { + bufferPool = &sync.Pool{ + New: func() interface{} { + return new(bytes.Buffer) + }, + } + + // start at the bottom of the stack before the package-name cache is primed + minimumCallerDepth = 1 +} + +// Defines the key when adding errors using WithError. +var ErrorKey = "error" + +// An entry is the final or intermediate Logrus logging entry. It contains all +// the fields passed with WithField{,s}. It's finally logged when Trace, Debug, +// Info, Warn, Error, Fatal or Panic is called on it. These objects can be +// reused and passed around as much as you wish to avoid field duplication. +type Entry struct { + Logger *Logger + + // Contains all the fields set by the user. + Data Fields + + // Time at which the log entry was created + Time time.Time + + // Level the log entry was logged at: Trace, Debug, Info, Warn, Error, Fatal or Panic + // This field will be set on entry firing and the value will be equal to the one in Logger struct field. + Level Level + + // Calling method, with package name + Caller *runtime.Frame + + // Message passed to Trace, Debug, Info, Warn, Error, Fatal or Panic + Message string + + // When formatter is called in entry.log(), a Buffer may be set to entry + Buffer *bytes.Buffer + + // err may contain a field formatting error + err string +} + +func NewEntry(logger *Logger) *Entry { + return &Entry{ + Logger: logger, + // Default is three fields, plus one optional. Give a little extra room. + Data: make(Fields, 6), + } +} + +// Returns the string representation from the reader and ultimately the +// formatter. +func (entry *Entry) String() (string, error) { + serialized, err := entry.Logger.Formatter.Format(entry) + if err != nil { + return "", err + } + str := string(serialized) + return str, nil +} + +// Add an error as single field (using the key defined in ErrorKey) to the Entry. +func (entry *Entry) WithError(err error) *Entry { + return entry.WithField(ErrorKey, err) +} + +// Add a single field to the Entry. +func (entry *Entry) WithField(key string, value interface{}) *Entry { + return entry.WithFields(Fields{key: value}) +} + +// Add a map of fields to the Entry. +func (entry *Entry) WithFields(fields Fields) *Entry { + data := make(Fields, len(entry.Data)+len(fields)) + for k, v := range entry.Data { + data[k] = v + } + var field_err string + for k, v := range fields { + if t := reflect.TypeOf(v); t != nil && t.Kind() == reflect.Func { + field_err = fmt.Sprintf("can not add field %q", k) + if entry.err != "" { + field_err = entry.err + ", " + field_err + } + } else { + data[k] = v + } + } + return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time, err: field_err} +} + +// Overrides the time of the Entry. +func (entry *Entry) WithTime(t time.Time) *Entry { + return &Entry{Logger: entry.Logger, Data: entry.Data, Time: t} +} + +// getPackageName reduces a fully qualified function name to the package name +// There really ought to be to be a better way... +func getPackageName(f string) string { + for { + lastPeriod := strings.LastIndex(f, ".") + lastSlash := strings.LastIndex(f, "/") + if lastPeriod > lastSlash { + f = f[:lastPeriod] + } else { + break + } + } + + return f +} + +// getCaller retrieves the name of the first non-logrus calling function +func getCaller() *runtime.Frame { + // Restrict the lookback frames to avoid runaway lookups + pcs := make([]uintptr, maximumCallerDepth) + depth := runtime.Callers(minimumCallerDepth, pcs) + frames := runtime.CallersFrames(pcs[:depth]) + + // cache this package's fully-qualified name + callerInitOnce.Do(func() { + logrusPackage = getPackageName(runtime.FuncForPC(pcs[0]).Name()) + + // now that we have the cache, we can skip a minimum count of known-logrus functions + // XXX this is dubious, the number of frames may vary store an entry in a logger interface + minimumCallerDepth = knownLogrusFrames + }) + + for f, again := frames.Next(); again; f, again = frames.Next() { + pkg := getPackageName(f.Function) + + // If the caller isn't part of this package, we're done + if pkg != logrusPackage { + return &f + } + } + + // if we got here, we failed to find the caller's context + return nil +} + +func (entry Entry) HasCaller() (has bool) { + return entry.Logger != nil && + entry.Logger.ReportCaller && + entry.Caller != nil +} + +// This function is not declared with a pointer value because otherwise +// race conditions will occur when using multiple goroutines +func (entry Entry) log(level Level, msg string) { + var buffer *bytes.Buffer + + // Default to now, but allow users to override if they want. + // + // We don't have to worry about polluting future calls to Entry#log() + // with this assignment because this function is declared with a + // non-pointer receiver. + if entry.Time.IsZero() { + entry.Time = time.Now() + } + + entry.Level = level + entry.Message = msg + if entry.Logger.ReportCaller { + entry.Caller = getCaller() + } + + entry.fireHooks() + + buffer = bufferPool.Get().(*bytes.Buffer) + buffer.Reset() + defer bufferPool.Put(buffer) + entry.Buffer = buffer + + entry.write() + + entry.Buffer = nil + + // To avoid Entry#log() returning a value that only would make sense for + // panic() to use in Entry#Panic(), we avoid the allocation by checking + // directly here. + if level <= PanicLevel { + panic(&entry) + } +} + +func (entry *Entry) fireHooks() { + entry.Logger.mu.Lock() + defer entry.Logger.mu.Unlock() + err := entry.Logger.Hooks.Fire(entry.Level, entry) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err) + } +} + +func (entry *Entry) write() { + entry.Logger.mu.Lock() + defer entry.Logger.mu.Unlock() + serialized, err := entry.Logger.Formatter.Format(entry) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err) + } else { + _, err = entry.Logger.Out.Write(serialized) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err) + } + } +} + +func (entry *Entry) Trace(args ...interface{}) { + if entry.Logger.IsLevelEnabled(TraceLevel) { + entry.log(TraceLevel, fmt.Sprint(args...)) + } +} + +func (entry *Entry) Debug(args ...interface{}) { + if entry.Logger.IsLevelEnabled(DebugLevel) { + entry.log(DebugLevel, fmt.Sprint(args...)) + } +} + +func (entry *Entry) Print(args ...interface{}) { + entry.Info(args...) +} + +func (entry *Entry) Info(args ...interface{}) { + if entry.Logger.IsLevelEnabled(InfoLevel) { + entry.log(InfoLevel, fmt.Sprint(args...)) + } +} + +func (entry *Entry) Warn(args ...interface{}) { + if entry.Logger.IsLevelEnabled(WarnLevel) { + entry.log(WarnLevel, fmt.Sprint(args...)) + } +} + +func (entry *Entry) Warning(args ...interface{}) { + entry.Warn(args...) +} + +func (entry *Entry) Error(args ...interface{}) { + if entry.Logger.IsLevelEnabled(ErrorLevel) { + entry.log(ErrorLevel, fmt.Sprint(args...)) + } +} + +func (entry *Entry) Fatal(args ...interface{}) { + if entry.Logger.IsLevelEnabled(FatalLevel) { + entry.log(FatalLevel, fmt.Sprint(args...)) + } + entry.Logger.Exit(1) +} + +func (entry *Entry) Panic(args ...interface{}) { + if entry.Logger.IsLevelEnabled(PanicLevel) { + entry.log(PanicLevel, fmt.Sprint(args...)) + } + panic(fmt.Sprint(args...)) +} + +// Entry Printf family functions + +func (entry *Entry) Tracef(format string, args ...interface{}) { + if entry.Logger.IsLevelEnabled(TraceLevel) { + entry.Trace(fmt.Sprintf(format, args...)) + } +} + +func (entry *Entry) Debugf(format string, args ...interface{}) { + if entry.Logger.IsLevelEnabled(DebugLevel) { + entry.Debug(fmt.Sprintf(format, args...)) + } +} + +func (entry *Entry) Infof(format string, args ...interface{}) { + if entry.Logger.IsLevelEnabled(InfoLevel) { + entry.Info(fmt.Sprintf(format, args...)) + } +} + +func (entry *Entry) Printf(format string, args ...interface{}) { + entry.Infof(format, args...) +} + +func (entry *Entry) Warnf(format string, args ...interface{}) { + if entry.Logger.IsLevelEnabled(WarnLevel) { + entry.Warn(fmt.Sprintf(format, args...)) + } +} + +func (entry *Entry) Warningf(format string, args ...interface{}) { + entry.Warnf(format, args...) +} + +func (entry *Entry) Errorf(format string, args ...interface{}) { + if entry.Logger.IsLevelEnabled(ErrorLevel) { + entry.Error(fmt.Sprintf(format, args...)) + } +} + +func (entry *Entry) Fatalf(format string, args ...interface{}) { + if entry.Logger.IsLevelEnabled(FatalLevel) { + entry.Fatal(fmt.Sprintf(format, args...)) + } + entry.Logger.Exit(1) +} + +func (entry *Entry) Panicf(format string, args ...interface{}) { + if entry.Logger.IsLevelEnabled(PanicLevel) { + entry.Panic(fmt.Sprintf(format, args...)) + } +} + +// Entry Println family functions + +func (entry *Entry) Traceln(args ...interface{}) { + if entry.Logger.IsLevelEnabled(TraceLevel) { + entry.Trace(entry.sprintlnn(args...)) + } +} + +func (entry *Entry) Debugln(args ...interface{}) { + if entry.Logger.IsLevelEnabled(DebugLevel) { + entry.Debug(entry.sprintlnn(args...)) + } +} + +func (entry *Entry) Infoln(args ...interface{}) { + if entry.Logger.IsLevelEnabled(InfoLevel) { + entry.Info(entry.sprintlnn(args...)) + } +} + +func (entry *Entry) Println(args ...interface{}) { + entry.Infoln(args...) +} + +func (entry *Entry) Warnln(args ...interface{}) { + if entry.Logger.IsLevelEnabled(WarnLevel) { + entry.Warn(entry.sprintlnn(args...)) + } +} + +func (entry *Entry) Warningln(args ...interface{}) { + entry.Warnln(args...) +} + +func (entry *Entry) Errorln(args ...interface{}) { + if entry.Logger.IsLevelEnabled(ErrorLevel) { + entry.Error(entry.sprintlnn(args...)) + } +} + +func (entry *Entry) Fatalln(args ...interface{}) { + if entry.Logger.IsLevelEnabled(FatalLevel) { + entry.Fatal(entry.sprintlnn(args...)) + } + entry.Logger.Exit(1) +} + +func (entry *Entry) Panicln(args ...interface{}) { + if entry.Logger.IsLevelEnabled(PanicLevel) { + entry.Panic(entry.sprintlnn(args...)) + } +} + +// Sprintlnn => Sprint no newline. This is to get the behavior of how +// fmt.Sprintln where spaces are always added between operands, regardless of +// their type. Instead of vendoring the Sprintln implementation to spare a +// string allocation, we do the simplest thing. +func (entry *Entry) sprintlnn(args ...interface{}) string { + msg := fmt.Sprintln(args...) + return msg[:len(msg)-1] +} diff --git a/vendor/github.com/sirupsen/logrus/exported.go b/vendor/github.com/sirupsen/logrus/exported.go new file mode 100644 index 00000000..7342613c --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/exported.go @@ -0,0 +1,219 @@ +package logrus + +import ( + "io" + "time" +) + +var ( + // std is the name of the standard logger in stdlib `log` + std = New() +) + +func StandardLogger() *Logger { + return std +} + +// SetOutput sets the standard logger output. +func SetOutput(out io.Writer) { + std.SetOutput(out) +} + +// SetFormatter sets the standard logger formatter. +func SetFormatter(formatter Formatter) { + std.SetFormatter(formatter) +} + +// SetReportCaller sets whether the standard logger will include the calling +// method as a field. +func SetReportCaller(include bool) { + std.SetReportCaller(include) +} + +// SetLevel sets the standard logger level. +func SetLevel(level Level) { + std.SetLevel(level) +} + +// GetLevel returns the standard logger level. +func GetLevel() Level { + return std.GetLevel() +} + +// IsLevelEnabled checks if the log level of the standard logger is greater than the level param +func IsLevelEnabled(level Level) bool { + return std.IsLevelEnabled(level) +} + +// AddHook adds a hook to the standard logger hooks. +func AddHook(hook Hook) { + std.AddHook(hook) +} + +// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key. +func WithError(err error) *Entry { + return std.WithField(ErrorKey, err) +} + +// WithField creates an entry from the standard logger and adds a field to +// it. If you want multiple fields, use `WithFields`. +// +// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal +// or Panic on the Entry it returns. +func WithField(key string, value interface{}) *Entry { + return std.WithField(key, value) +} + +// WithFields creates an entry from the standard logger and adds multiple +// fields to it. This is simply a helper for `WithField`, invoking it +// once for each field. +// +// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal +// or Panic on the Entry it returns. +func WithFields(fields Fields) *Entry { + return std.WithFields(fields) +} + +// WithTime creats an entry from the standard logger and overrides the time of +// logs generated with it. +// +// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal +// or Panic on the Entry it returns. +func WithTime(t time.Time) *Entry { + return std.WithTime(t) +} + +// Trace logs a message at level Trace on the standard logger. +func Trace(args ...interface{}) { + std.Trace(args...) +} + +// Debug logs a message at level Debug on the standard logger. +func Debug(args ...interface{}) { + std.Debug(args...) +} + +// Print logs a message at level Info on the standard logger. +func Print(args ...interface{}) { + std.Print(args...) +} + +// Info logs a message at level Info on the standard logger. +func Info(args ...interface{}) { + std.Info(args...) +} + +// Warn logs a message at level Warn on the standard logger. +func Warn(args ...interface{}) { + std.Warn(args...) +} + +// Warning logs a message at level Warn on the standard logger. +func Warning(args ...interface{}) { + std.Warning(args...) +} + +// Error logs a message at level Error on the standard logger. +func Error(args ...interface{}) { + std.Error(args...) +} + +// Panic logs a message at level Panic on the standard logger. +func Panic(args ...interface{}) { + std.Panic(args...) +} + +// Fatal logs a message at level Fatal on the standard logger then the process will exit with status set to 1. +func Fatal(args ...interface{}) { + std.Fatal(args...) +} + +// Tracef logs a message at level Trace on the standard logger. +func Tracef(format string, args ...interface{}) { + std.Tracef(format, args...) +} + +// Debugf logs a message at level Debug on the standard logger. +func Debugf(format string, args ...interface{}) { + std.Debugf(format, args...) +} + +// Printf logs a message at level Info on the standard logger. +func Printf(format string, args ...interface{}) { + std.Printf(format, args...) +} + +// Infof logs a message at level Info on the standard logger. +func Infof(format string, args ...interface{}) { + std.Infof(format, args...) +} + +// Warnf logs a message at level Warn on the standard logger. +func Warnf(format string, args ...interface{}) { + std.Warnf(format, args...) +} + +// Warningf logs a message at level Warn on the standard logger. +func Warningf(format string, args ...interface{}) { + std.Warningf(format, args...) +} + +// Errorf logs a message at level Error on the standard logger. +func Errorf(format string, args ...interface{}) { + std.Errorf(format, args...) +} + +// Panicf logs a message at level Panic on the standard logger. +func Panicf(format string, args ...interface{}) { + std.Panicf(format, args...) +} + +// Fatalf logs a message at level Fatal on the standard logger then the process will exit with status set to 1. +func Fatalf(format string, args ...interface{}) { + std.Fatalf(format, args...) +} + +// Traceln logs a message at level Trace on the standard logger. +func Traceln(args ...interface{}) { + std.Traceln(args...) +} + +// Debugln logs a message at level Debug on the standard logger. +func Debugln(args ...interface{}) { + std.Debugln(args...) +} + +// Println logs a message at level Info on the standard logger. +func Println(args ...interface{}) { + std.Println(args...) +} + +// Infoln logs a message at level Info on the standard logger. +func Infoln(args ...interface{}) { + std.Infoln(args...) +} + +// Warnln logs a message at level Warn on the standard logger. +func Warnln(args ...interface{}) { + std.Warnln(args...) +} + +// Warningln logs a message at level Warn on the standard logger. +func Warningln(args ...interface{}) { + std.Warningln(args...) +} + +// Errorln logs a message at level Error on the standard logger. +func Errorln(args ...interface{}) { + std.Errorln(args...) +} + +// Panicln logs a message at level Panic on the standard logger. +func Panicln(args ...interface{}) { + std.Panicln(args...) +} + +// Fatalln logs a message at level Fatal on the standard logger then the process will exit with status set to 1. +func Fatalln(args ...interface{}) { + std.Fatalln(args...) +} diff --git a/vendor/github.com/sirupsen/logrus/formatter.go b/vendor/github.com/sirupsen/logrus/formatter.go new file mode 100644 index 00000000..40888377 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/formatter.go @@ -0,0 +1,78 @@ +package logrus + +import "time" + +// Default key names for the default fields +const ( + defaultTimestampFormat = time.RFC3339 + FieldKeyMsg = "msg" + FieldKeyLevel = "level" + FieldKeyTime = "time" + FieldKeyLogrusError = "logrus_error" + FieldKeyFunc = "func" + FieldKeyFile = "file" +) + +// The Formatter interface is used to implement a custom Formatter. It takes an +// `Entry`. It exposes all the fields, including the default ones: +// +// * `entry.Data["msg"]`. The message passed from Info, Warn, Error .. +// * `entry.Data["time"]`. The timestamp. +// * `entry.Data["level"]. The level the entry was logged at. +// +// Any additional fields added with `WithField` or `WithFields` are also in +// `entry.Data`. Format is expected to return an array of bytes which are then +// logged to `logger.Out`. +type Formatter interface { + Format(*Entry) ([]byte, error) +} + +// This is to not silently overwrite `time`, `msg`, `func` and `level` fields when +// dumping it. If this code wasn't there doing: +// +// logrus.WithField("level", 1).Info("hello") +// +// Would just silently drop the user provided level. Instead with this code +// it'll logged as: +// +// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."} +// +// It's not exported because it's still using Data in an opinionated way. It's to +// avoid code duplication between the two default formatters. +func prefixFieldClashes(data Fields, fieldMap FieldMap, reportCaller bool) { + timeKey := fieldMap.resolve(FieldKeyTime) + if t, ok := data[timeKey]; ok { + data["fields."+timeKey] = t + delete(data, timeKey) + } + + msgKey := fieldMap.resolve(FieldKeyMsg) + if m, ok := data[msgKey]; ok { + data["fields."+msgKey] = m + delete(data, msgKey) + } + + levelKey := fieldMap.resolve(FieldKeyLevel) + if l, ok := data[levelKey]; ok { + data["fields."+levelKey] = l + delete(data, levelKey) + } + + logrusErrKey := fieldMap.resolve(FieldKeyLogrusError) + if l, ok := data[logrusErrKey]; ok { + data["fields."+logrusErrKey] = l + delete(data, logrusErrKey) + } + + // If reportCaller is not set, 'func' will not conflict. + if reportCaller { + funcKey := fieldMap.resolve(FieldKeyFunc) + if l, ok := data[funcKey]; ok { + data["fields."+funcKey] = l + } + fileKey := fieldMap.resolve(FieldKeyFile) + if l, ok := data[fileKey]; ok { + data["fields."+fileKey] = l + } + } +} diff --git a/vendor/github.com/sirupsen/logrus/hooks.go b/vendor/github.com/sirupsen/logrus/hooks.go new file mode 100644 index 00000000..3f151cdc --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/hooks.go @@ -0,0 +1,34 @@ +package logrus + +// A hook to be fired when logging on the logging levels returned from +// `Levels()` on your implementation of the interface. Note that this is not +// fired in a goroutine or a channel with workers, you should handle such +// functionality yourself if your call is non-blocking and you don't wish for +// the logging calls for levels returned from `Levels()` to block. +type Hook interface { + Levels() []Level + Fire(*Entry) error +} + +// Internal type for storing the hooks on a logger instance. +type LevelHooks map[Level][]Hook + +// Add a hook to an instance of logger. This is called with +// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface. +func (hooks LevelHooks) Add(hook Hook) { + for _, level := range hook.Levels() { + hooks[level] = append(hooks[level], hook) + } +} + +// Fire all the hooks for the passed level. Used by `entry.log` to fire +// appropriate hooks for a log entry. +func (hooks LevelHooks) Fire(level Level, entry *Entry) error { + for _, hook := range hooks[level] { + if err := hook.Fire(entry); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/sirupsen/logrus/json_formatter.go b/vendor/github.com/sirupsen/logrus/json_formatter.go new file mode 100644 index 00000000..26057535 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/json_formatter.go @@ -0,0 +1,105 @@ +package logrus + +import ( + "bytes" + "encoding/json" + "fmt" +) + +type fieldKey string + +// FieldMap allows customization of the key names for default fields. +type FieldMap map[fieldKey]string + +func (f FieldMap) resolve(key fieldKey) string { + if k, ok := f[key]; ok { + return k + } + + return string(key) +} + +// JSONFormatter formats logs into parsable json +type JSONFormatter struct { + // TimestampFormat sets the format used for marshaling timestamps. + TimestampFormat string + + // DisableTimestamp allows disabling automatic timestamps in output + DisableTimestamp bool + + // DataKey allows users to put all the log entry parameters into a nested dictionary at a given key. + DataKey string + + // FieldMap allows users to customize the names of keys for default fields. + // As an example: + // formatter := &JSONFormatter{ + // FieldMap: FieldMap{ + // FieldKeyTime: "@timestamp", + // FieldKeyLevel: "@level", + // FieldKeyMsg: "@message", + // FieldKeyFunc: "@caller", + // }, + // } + FieldMap FieldMap + + // PrettyPrint will indent all json logs + PrettyPrint bool +} + +// Format renders a single log entry +func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { + data := make(Fields, len(entry.Data)+4) + for k, v := range entry.Data { + switch v := v.(type) { + case error: + // Otherwise errors are ignored by `encoding/json` + // https://github.com/sirupsen/logrus/issues/137 + data[k] = v.Error() + default: + data[k] = v + } + } + + if f.DataKey != "" { + newData := make(Fields, 4) + newData[f.DataKey] = data + data = newData + } + + prefixFieldClashes(data, f.FieldMap, entry.HasCaller()) + + timestampFormat := f.TimestampFormat + if timestampFormat == "" { + timestampFormat = defaultTimestampFormat + } + + if entry.err != "" { + data[f.FieldMap.resolve(FieldKeyLogrusError)] = entry.err + } + if !f.DisableTimestamp { + data[f.FieldMap.resolve(FieldKeyTime)] = entry.Time.Format(timestampFormat) + } + data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message + data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String() + if entry.HasCaller() { + data[f.FieldMap.resolve(FieldKeyFunc)] = entry.Caller.Function + data[f.FieldMap.resolve(FieldKeyFile)] = fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line) + } + + var b *bytes.Buffer + if entry.Buffer != nil { + b = entry.Buffer + } else { + b = &bytes.Buffer{} + } + + encoder := json.NewEncoder(b) + if f.PrettyPrint { + encoder.SetIndent("", " ") + } + if err := encoder.Encode(data); err != nil { + return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) + } + + return b.Bytes(), nil +} diff --git a/vendor/github.com/sirupsen/logrus/logger.go b/vendor/github.com/sirupsen/logrus/logger.go new file mode 100644 index 00000000..5ceca0ea --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/logger.go @@ -0,0 +1,415 @@ +package logrus + +import ( + "io" + "os" + "sync" + "sync/atomic" + "time" +) + +type Logger struct { + // The logs are `io.Copy`'d to this in a mutex. It's common to set this to a + // file, or leave it default which is `os.Stderr`. You can also set this to + // something more adventurous, such as logging to Kafka. + Out io.Writer + // Hooks for the logger instance. These allow firing events based on logging + // levels and log entries. For example, to send errors to an error tracking + // service, log to StatsD or dump the core on fatal errors. + Hooks LevelHooks + // All log entries pass through the formatter before logged to Out. The + // included formatters are `TextFormatter` and `JSONFormatter` for which + // TextFormatter is the default. In development (when a TTY is attached) it + // logs with colors, but to a file it wouldn't. You can easily implement your + // own that implements the `Formatter` interface, see the `README` or included + // formatters for examples. + Formatter Formatter + + // Flag for whether to log caller info (off by default) + ReportCaller bool + + // The logging level the logger should log at. This is typically (and defaults + // to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be + // logged. + Level Level + // Used to sync writing to the log. Locking is enabled by Default + mu MutexWrap + // Reusable empty entry + entryPool sync.Pool + // Function to exit the application, defaults to `os.Exit()` + ExitFunc exitFunc +} + +type exitFunc func(int) + +type MutexWrap struct { + lock sync.Mutex + disabled bool +} + +func (mw *MutexWrap) Lock() { + if !mw.disabled { + mw.lock.Lock() + } +} + +func (mw *MutexWrap) Unlock() { + if !mw.disabled { + mw.lock.Unlock() + } +} + +func (mw *MutexWrap) Disable() { + mw.disabled = true +} + +// Creates a new logger. Configuration should be set by changing `Formatter`, +// `Out` and `Hooks` directly on the default logger instance. You can also just +// instantiate your own: +// +// var log = &Logger{ +// Out: os.Stderr, +// Formatter: new(JSONFormatter), +// Hooks: make(LevelHooks), +// Level: logrus.DebugLevel, +// } +// +// It's recommended to make this a global instance called `log`. +func New() *Logger { + return &Logger{ + Out: os.Stderr, + Formatter: new(TextFormatter), + Hooks: make(LevelHooks), + Level: InfoLevel, + ExitFunc: os.Exit, + ReportCaller: false, + } +} + +func (logger *Logger) newEntry() *Entry { + entry, ok := logger.entryPool.Get().(*Entry) + if ok { + return entry + } + return NewEntry(logger) +} + +func (logger *Logger) releaseEntry(entry *Entry) { + entry.Data = map[string]interface{}{} + logger.entryPool.Put(entry) +} + +// Adds a field to the log entry, note that it doesn't log until you call +// Debug, Print, Info, Warn, Error, Fatal or Panic. It only creates a log entry. +// If you want multiple fields, use `WithFields`. +func (logger *Logger) WithField(key string, value interface{}) *Entry { + entry := logger.newEntry() + defer logger.releaseEntry(entry) + return entry.WithField(key, value) +} + +// Adds a struct of fields to the log entry. All it does is call `WithField` for +// each `Field`. +func (logger *Logger) WithFields(fields Fields) *Entry { + entry := logger.newEntry() + defer logger.releaseEntry(entry) + return entry.WithFields(fields) +} + +// Add an error as single field to the log entry. All it does is call +// `WithError` for the given `error`. +func (logger *Logger) WithError(err error) *Entry { + entry := logger.newEntry() + defer logger.releaseEntry(entry) + return entry.WithError(err) +} + +// Overrides the time of the log entry. +func (logger *Logger) WithTime(t time.Time) *Entry { + entry := logger.newEntry() + defer logger.releaseEntry(entry) + return entry.WithTime(t) +} + +func (logger *Logger) Tracef(format string, args ...interface{}) { + if logger.IsLevelEnabled(TraceLevel) { + entry := logger.newEntry() + entry.Tracef(format, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Debugf(format string, args ...interface{}) { + if logger.IsLevelEnabled(DebugLevel) { + entry := logger.newEntry() + entry.Debugf(format, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Infof(format string, args ...interface{}) { + if logger.IsLevelEnabled(InfoLevel) { + entry := logger.newEntry() + entry.Infof(format, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Printf(format string, args ...interface{}) { + entry := logger.newEntry() + entry.Printf(format, args...) + logger.releaseEntry(entry) +} + +func (logger *Logger) Warnf(format string, args ...interface{}) { + if logger.IsLevelEnabled(WarnLevel) { + entry := logger.newEntry() + entry.Warnf(format, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Warningf(format string, args ...interface{}) { + if logger.IsLevelEnabled(WarnLevel) { + entry := logger.newEntry() + entry.Warnf(format, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Errorf(format string, args ...interface{}) { + if logger.IsLevelEnabled(ErrorLevel) { + entry := logger.newEntry() + entry.Errorf(format, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Fatalf(format string, args ...interface{}) { + if logger.IsLevelEnabled(FatalLevel) { + entry := logger.newEntry() + entry.Fatalf(format, args...) + logger.releaseEntry(entry) + } + logger.Exit(1) +} + +func (logger *Logger) Panicf(format string, args ...interface{}) { + if logger.IsLevelEnabled(PanicLevel) { + entry := logger.newEntry() + entry.Panicf(format, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Trace(args ...interface{}) { + if logger.IsLevelEnabled(TraceLevel) { + entry := logger.newEntry() + entry.Trace(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Debug(args ...interface{}) { + if logger.IsLevelEnabled(DebugLevel) { + entry := logger.newEntry() + entry.Debug(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Info(args ...interface{}) { + if logger.IsLevelEnabled(InfoLevel) { + entry := logger.newEntry() + entry.Info(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Print(args ...interface{}) { + entry := logger.newEntry() + entry.Info(args...) + logger.releaseEntry(entry) +} + +func (logger *Logger) Warn(args ...interface{}) { + if logger.IsLevelEnabled(WarnLevel) { + entry := logger.newEntry() + entry.Warn(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Warning(args ...interface{}) { + if logger.IsLevelEnabled(WarnLevel) { + entry := logger.newEntry() + entry.Warn(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Error(args ...interface{}) { + if logger.IsLevelEnabled(ErrorLevel) { + entry := logger.newEntry() + entry.Error(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Fatal(args ...interface{}) { + if logger.IsLevelEnabled(FatalLevel) { + entry := logger.newEntry() + entry.Fatal(args...) + logger.releaseEntry(entry) + } + logger.Exit(1) +} + +func (logger *Logger) Panic(args ...interface{}) { + if logger.IsLevelEnabled(PanicLevel) { + entry := logger.newEntry() + entry.Panic(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Traceln(args ...interface{}) { + if logger.IsLevelEnabled(TraceLevel) { + entry := logger.newEntry() + entry.Traceln(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Debugln(args ...interface{}) { + if logger.IsLevelEnabled(DebugLevel) { + entry := logger.newEntry() + entry.Debugln(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Infoln(args ...interface{}) { + if logger.IsLevelEnabled(InfoLevel) { + entry := logger.newEntry() + entry.Infoln(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Println(args ...interface{}) { + entry := logger.newEntry() + entry.Println(args...) + logger.releaseEntry(entry) +} + +func (logger *Logger) Warnln(args ...interface{}) { + if logger.IsLevelEnabled(WarnLevel) { + entry := logger.newEntry() + entry.Warnln(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Warningln(args ...interface{}) { + if logger.IsLevelEnabled(WarnLevel) { + entry := logger.newEntry() + entry.Warnln(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Errorln(args ...interface{}) { + if logger.IsLevelEnabled(ErrorLevel) { + entry := logger.newEntry() + entry.Errorln(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Fatalln(args ...interface{}) { + if logger.IsLevelEnabled(FatalLevel) { + entry := logger.newEntry() + entry.Fatalln(args...) + logger.releaseEntry(entry) + } + logger.Exit(1) +} + +func (logger *Logger) Panicln(args ...interface{}) { + if logger.IsLevelEnabled(PanicLevel) { + entry := logger.newEntry() + entry.Panicln(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Exit(code int) { + runHandlers() + if logger.ExitFunc == nil { + logger.ExitFunc = os.Exit + } + logger.ExitFunc(code) +} + +//When file is opened with appending mode, it's safe to +//write concurrently to a file (within 4k message on Linux). +//In these cases user can choose to disable the lock. +func (logger *Logger) SetNoLock() { + logger.mu.Disable() +} + +func (logger *Logger) level() Level { + return Level(atomic.LoadUint32((*uint32)(&logger.Level))) +} + +// SetLevel sets the logger level. +func (logger *Logger) SetLevel(level Level) { + atomic.StoreUint32((*uint32)(&logger.Level), uint32(level)) +} + +// GetLevel returns the logger level. +func (logger *Logger) GetLevel() Level { + return logger.level() +} + +// AddHook adds a hook to the logger hooks. +func (logger *Logger) AddHook(hook Hook) { + logger.mu.Lock() + defer logger.mu.Unlock() + logger.Hooks.Add(hook) +} + +// IsLevelEnabled checks if the log level of the logger is greater than the level param +func (logger *Logger) IsLevelEnabled(level Level) bool { + return logger.level() >= level +} + +// SetFormatter sets the logger formatter. +func (logger *Logger) SetFormatter(formatter Formatter) { + logger.mu.Lock() + defer logger.mu.Unlock() + logger.Formatter = formatter +} + +// SetOutput sets the logger output. +func (logger *Logger) SetOutput(output io.Writer) { + logger.mu.Lock() + defer logger.mu.Unlock() + logger.Out = output +} + +func (logger *Logger) SetReportCaller(reportCaller bool) { + logger.mu.Lock() + defer logger.mu.Unlock() + logger.ReportCaller = reportCaller +} + +// ReplaceHooks replaces the logger hooks and returns the old ones +func (logger *Logger) ReplaceHooks(hooks LevelHooks) LevelHooks { + logger.mu.Lock() + oldHooks := logger.Hooks + logger.Hooks = hooks + logger.mu.Unlock() + return oldHooks +} diff --git a/vendor/github.com/sirupsen/logrus/logrus.go b/vendor/github.com/sirupsen/logrus/logrus.go new file mode 100644 index 00000000..4ef45186 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/logrus.go @@ -0,0 +1,178 @@ +package logrus + +import ( + "fmt" + "log" + "strings" +) + +// Fields type, used to pass to `WithFields`. +type Fields map[string]interface{} + +// Level type +type Level uint32 + +// Convert the Level to a string. E.g. PanicLevel becomes "panic". +func (level Level) String() string { + switch level { + case TraceLevel: + return "trace" + case DebugLevel: + return "debug" + case InfoLevel: + return "info" + case WarnLevel: + return "warning" + case ErrorLevel: + return "error" + case FatalLevel: + return "fatal" + case PanicLevel: + return "panic" + } + + return "unknown" +} + +// ParseLevel takes a string level and returns the Logrus log level constant. +func ParseLevel(lvl string) (Level, error) { + switch strings.ToLower(lvl) { + case "panic": + return PanicLevel, nil + case "fatal": + return FatalLevel, nil + case "error": + return ErrorLevel, nil + case "warn", "warning": + return WarnLevel, nil + case "info": + return InfoLevel, nil + case "debug": + return DebugLevel, nil + case "trace": + return TraceLevel, nil + } + + var l Level + return l, fmt.Errorf("not a valid logrus Level: %q", lvl) +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (level *Level) UnmarshalText(text []byte) error { + l, err := ParseLevel(string(text)) + if err != nil { + return err + } + + *level = Level(l) + + return nil +} + +// A constant exposing all logging levels +var AllLevels = []Level{ + PanicLevel, + FatalLevel, + ErrorLevel, + WarnLevel, + InfoLevel, + DebugLevel, + TraceLevel, +} + +// These are the different logging levels. You can set the logging level to log +// on your instance of logger, obtained with `logrus.New()`. +const ( + // PanicLevel level, highest level of severity. Logs and then calls panic with the + // message passed to Debug, Info, ... + PanicLevel Level = iota + // FatalLevel level. Logs and then calls `logger.Exit(1)`. It will exit even if the + // logging level is set to Panic. + FatalLevel + // ErrorLevel level. Logs. Used for errors that should definitely be noted. + // Commonly used for hooks to send errors to an error tracking service. + ErrorLevel + // WarnLevel level. Non-critical entries that deserve eyes. + WarnLevel + // InfoLevel level. General operational entries about what's going on inside the + // application. + InfoLevel + // DebugLevel level. Usually only enabled when debugging. Very verbose logging. + DebugLevel + // TraceLevel level. Designates finer-grained informational events than the Debug. + TraceLevel +) + +// Won't compile if StdLogger can't be realized by a log.Logger +var ( + _ StdLogger = &log.Logger{} + _ StdLogger = &Entry{} + _ StdLogger = &Logger{} +) + +// StdLogger is what your logrus-enabled library should take, that way +// it'll accept a stdlib logger and a logrus logger. There's no standard +// interface, this is the closest we get, unfortunately. +type StdLogger interface { + Print(...interface{}) + Printf(string, ...interface{}) + Println(...interface{}) + + Fatal(...interface{}) + Fatalf(string, ...interface{}) + Fatalln(...interface{}) + + Panic(...interface{}) + Panicf(string, ...interface{}) + Panicln(...interface{}) +} + +// The FieldLogger interface generalizes the Entry and Logger types +type FieldLogger interface { + WithField(key string, value interface{}) *Entry + WithFields(fields Fields) *Entry + WithError(err error) *Entry + + Debugf(format string, args ...interface{}) + Infof(format string, args ...interface{}) + Printf(format string, args ...interface{}) + Warnf(format string, args ...interface{}) + Warningf(format string, args ...interface{}) + Errorf(format string, args ...interface{}) + Fatalf(format string, args ...interface{}) + Panicf(format string, args ...interface{}) + + Debug(args ...interface{}) + Info(args ...interface{}) + Print(args ...interface{}) + Warn(args ...interface{}) + Warning(args ...interface{}) + Error(args ...interface{}) + Fatal(args ...interface{}) + Panic(args ...interface{}) + + Debugln(args ...interface{}) + Infoln(args ...interface{}) + Println(args ...interface{}) + Warnln(args ...interface{}) + Warningln(args ...interface{}) + Errorln(args ...interface{}) + Fatalln(args ...interface{}) + Panicln(args ...interface{}) + + // IsDebugEnabled() bool + // IsInfoEnabled() bool + // IsWarnEnabled() bool + // IsErrorEnabled() bool + // IsFatalEnabled() bool + // IsPanicEnabled() bool +} + +// Ext1FieldLogger (the first extension to FieldLogger) is superfluous, it is +// here for consistancy. Do not use. Use Logger or Entry instead. +type Ext1FieldLogger interface { + FieldLogger + Tracef(format string, args ...interface{}) + Trace(args ...interface{}) + Traceln(args ...interface{}) +} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go b/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go new file mode 100644 index 00000000..2403de98 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go @@ -0,0 +1,11 @@ +// +build appengine + +package logrus + +import ( + "io" +) + +func checkIfTerminal(w io.Writer) bool { + return true +} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_js.go b/vendor/github.com/sirupsen/logrus/terminal_check_js.go new file mode 100644 index 00000000..0c209750 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/terminal_check_js.go @@ -0,0 +1,11 @@ +// +build js + +package logrus + +import ( + "io" +) + +func checkIfTerminal(w io.Writer) bool { + return false +} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go b/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go new file mode 100644 index 00000000..cf309d6f --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go @@ -0,0 +1,19 @@ +// +build !appengine,!js,!windows + +package logrus + +import ( + "io" + "os" + + "golang.org/x/crypto/ssh/terminal" +) + +func checkIfTerminal(w io.Writer) bool { + switch v := w.(type) { + case *os.File: + return terminal.IsTerminal(int(v.Fd())) + default: + return false + } +} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_windows.go b/vendor/github.com/sirupsen/logrus/terminal_check_windows.go new file mode 100644 index 00000000..3b9d2864 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/terminal_check_windows.go @@ -0,0 +1,20 @@ +// +build !appengine,!js,windows + +package logrus + +import ( + "io" + "os" + "syscall" +) + +func checkIfTerminal(w io.Writer) bool { + switch v := w.(type) { + case *os.File: + var mode uint32 + err := syscall.GetConsoleMode(syscall.Handle(v.Fd()), &mode) + return err == nil + default: + return false + } +} diff --git a/vendor/github.com/sirupsen/logrus/terminal_notwindows.go b/vendor/github.com/sirupsen/logrus/terminal_notwindows.go new file mode 100644 index 00000000..3dbd2372 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/terminal_notwindows.go @@ -0,0 +1,8 @@ +// +build !windows + +package logrus + +import "io" + +func initTerminal(w io.Writer) { +} diff --git a/vendor/github.com/sirupsen/logrus/terminal_windows.go b/vendor/github.com/sirupsen/logrus/terminal_windows.go new file mode 100644 index 00000000..b4ef5286 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/terminal_windows.go @@ -0,0 +1,18 @@ +// +build !appengine,!js,windows + +package logrus + +import ( + "io" + "os" + "syscall" + + sequences "github.com/konsorten/go-windows-terminal-sequences" +) + +func initTerminal(w io.Writer) { + switch v := w.(type) { + case *os.File: + sequences.EnableVirtualTerminalProcessing(syscall.Handle(v.Fd()), true) + } +} diff --git a/vendor/github.com/sirupsen/logrus/text_formatter.go b/vendor/github.com/sirupsen/logrus/text_formatter.go new file mode 100644 index 00000000..49ec92f1 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/text_formatter.go @@ -0,0 +1,269 @@ +package logrus + +import ( + "bytes" + "fmt" + "os" + "sort" + "strings" + "sync" + "time" +) + +const ( + nocolor = 0 + red = 31 + green = 32 + yellow = 33 + blue = 36 + gray = 37 +) + +var ( + baseTimestamp time.Time + emptyFieldMap FieldMap +) + +func init() { + baseTimestamp = time.Now() +} + +// TextFormatter formats logs into text +type TextFormatter struct { + // Set to true to bypass checking for a TTY before outputting colors. + ForceColors bool + + // Force disabling colors. + DisableColors bool + + // Override coloring based on CLICOLOR and CLICOLOR_FORCE. - https://bixense.com/clicolors/ + EnvironmentOverrideColors bool + + // Disable timestamp logging. useful when output is redirected to logging + // system that already adds timestamps. + DisableTimestamp bool + + // Enable logging the full timestamp when a TTY is attached instead of just + // the time passed since beginning of execution. + FullTimestamp bool + + // TimestampFormat to use for display when a full timestamp is printed + TimestampFormat string + + // The fields are sorted by default for a consistent output. For applications + // that log extremely frequently and don't use the JSON formatter this may not + // be desired. + DisableSorting bool + + // The keys sorting function, when uninitialized it uses sort.Strings. + SortingFunc func([]string) + + // Disables the truncation of the level text to 4 characters. + DisableLevelTruncation bool + + // QuoteEmptyFields will wrap empty fields in quotes if true + QuoteEmptyFields bool + + // Whether the logger's out is to a terminal + isTerminal bool + + // FieldMap allows users to customize the names of keys for default fields. + // As an example: + // formatter := &TextFormatter{ + // FieldMap: FieldMap{ + // FieldKeyTime: "@timestamp", + // FieldKeyLevel: "@level", + // FieldKeyMsg: "@message"}} + FieldMap FieldMap + + terminalInitOnce sync.Once +} + +func (f *TextFormatter) init(entry *Entry) { + if entry.Logger != nil { + f.isTerminal = checkIfTerminal(entry.Logger.Out) + + if f.isTerminal { + initTerminal(entry.Logger.Out) + } + } +} + +func (f *TextFormatter) isColored() bool { + isColored := f.ForceColors || f.isTerminal + + if f.EnvironmentOverrideColors { + if force, ok := os.LookupEnv("CLICOLOR_FORCE"); ok && force != "0" { + isColored = true + } else if ok && force == "0" { + isColored = false + } else if os.Getenv("CLICOLOR") == "0" { + isColored = false + } + } + + return isColored && !f.DisableColors +} + +// Format renders a single log entry +func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { + prefixFieldClashes(entry.Data, f.FieldMap, entry.HasCaller()) + + keys := make([]string, 0, len(entry.Data)) + for k := range entry.Data { + keys = append(keys, k) + } + + fixedKeys := make([]string, 0, 4+len(entry.Data)) + if !f.DisableTimestamp { + fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyTime)) + } + fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyLevel)) + if entry.Message != "" { + fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyMsg)) + } + if entry.err != "" { + fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyLogrusError)) + } + if entry.HasCaller() { + fixedKeys = append(fixedKeys, + f.FieldMap.resolve(FieldKeyFunc), f.FieldMap.resolve(FieldKeyFile)) + } + + if !f.DisableSorting { + if f.SortingFunc == nil { + sort.Strings(keys) + fixedKeys = append(fixedKeys, keys...) + } else { + if !f.isColored() { + fixedKeys = append(fixedKeys, keys...) + f.SortingFunc(fixedKeys) + } else { + f.SortingFunc(keys) + } + } + } else { + fixedKeys = append(fixedKeys, keys...) + } + + var b *bytes.Buffer + if entry.Buffer != nil { + b = entry.Buffer + } else { + b = &bytes.Buffer{} + } + + f.terminalInitOnce.Do(func() { f.init(entry) }) + + timestampFormat := f.TimestampFormat + if timestampFormat == "" { + timestampFormat = defaultTimestampFormat + } + if f.isColored() { + f.printColored(b, entry, keys, timestampFormat) + } else { + for _, key := range fixedKeys { + var value interface{} + switch { + case key == f.FieldMap.resolve(FieldKeyTime): + value = entry.Time.Format(timestampFormat) + case key == f.FieldMap.resolve(FieldKeyLevel): + value = entry.Level.String() + case key == f.FieldMap.resolve(FieldKeyMsg): + value = entry.Message + case key == f.FieldMap.resolve(FieldKeyLogrusError): + value = entry.err + case key == f.FieldMap.resolve(FieldKeyFunc) && entry.HasCaller(): + value = entry.Caller.Function + case key == f.FieldMap.resolve(FieldKeyFile) && entry.HasCaller(): + value = fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line) + default: + value = entry.Data[key] + } + f.appendKeyValue(b, key, value) + } + } + + b.WriteByte('\n') + return b.Bytes(), nil +} + +func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, timestampFormat string) { + var levelColor int + switch entry.Level { + case DebugLevel, TraceLevel: + levelColor = gray + case WarnLevel: + levelColor = yellow + case ErrorLevel, FatalLevel, PanicLevel: + levelColor = red + default: + levelColor = blue + } + + levelText := strings.ToUpper(entry.Level.String()) + if !f.DisableLevelTruncation { + levelText = levelText[0:4] + } + + // Remove a single newline if it already exists in the message to keep + // the behavior of logrus text_formatter the same as the stdlib log package + entry.Message = strings.TrimSuffix(entry.Message, "\n") + + caller := "" + + if entry.HasCaller() { + caller = fmt.Sprintf("%s:%d %s()", + entry.Caller.File, entry.Caller.Line, entry.Caller.Function) + } + + if f.DisableTimestamp { + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m%s %-44s ", levelColor, levelText, caller, entry.Message) + } else if !f.FullTimestamp { + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d]%s %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), caller, entry.Message) + } else { + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s]%s %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), caller, entry.Message) + } + for _, k := range keys { + v := entry.Data[k] + fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=", levelColor, k) + f.appendValue(b, v) + } +} + +func (f *TextFormatter) needsQuoting(text string) bool { + if f.QuoteEmptyFields && len(text) == 0 { + return true + } + for _, ch := range text { + if !((ch >= 'a' && ch <= 'z') || + (ch >= 'A' && ch <= 'Z') || + (ch >= '0' && ch <= '9') || + ch == '-' || ch == '.' || ch == '_' || ch == '/' || ch == '@' || ch == '^' || ch == '+') { + return true + } + } + return false +} + +func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) { + if b.Len() > 0 { + b.WriteByte(' ') + } + b.WriteString(key) + b.WriteByte('=') + f.appendValue(b, value) +} + +func (f *TextFormatter) appendValue(b *bytes.Buffer, value interface{}) { + stringVal, ok := value.(string) + if !ok { + stringVal = fmt.Sprint(value) + } + + if !f.needsQuoting(stringVal) { + b.WriteString(stringVal) + } else { + b.WriteString(fmt.Sprintf("%q", stringVal)) + } +} diff --git a/vendor/github.com/sirupsen/logrus/writer.go b/vendor/github.com/sirupsen/logrus/writer.go new file mode 100644 index 00000000..9e1f7513 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/writer.go @@ -0,0 +1,64 @@ +package logrus + +import ( + "bufio" + "io" + "runtime" +) + +func (logger *Logger) Writer() *io.PipeWriter { + return logger.WriterLevel(InfoLevel) +} + +func (logger *Logger) WriterLevel(level Level) *io.PipeWriter { + return NewEntry(logger).WriterLevel(level) +} + +func (entry *Entry) Writer() *io.PipeWriter { + return entry.WriterLevel(InfoLevel) +} + +func (entry *Entry) WriterLevel(level Level) *io.PipeWriter { + reader, writer := io.Pipe() + + var printFunc func(args ...interface{}) + + switch level { + case TraceLevel: + printFunc = entry.Trace + case DebugLevel: + printFunc = entry.Debug + case InfoLevel: + printFunc = entry.Info + case WarnLevel: + printFunc = entry.Warn + case ErrorLevel: + printFunc = entry.Error + case FatalLevel: + printFunc = entry.Fatal + case PanicLevel: + printFunc = entry.Panic + default: + printFunc = entry.Print + } + + go entry.writerScanner(reader, printFunc) + runtime.SetFinalizer(writer, writerFinalizer) + + return writer +} + +func (entry *Entry) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) { + scanner := bufio.NewScanner(reader) + for scanner.Scan() { + printFunc(scanner.Text()) + } + if err := scanner.Err(); err != nil { + entry.Errorf("Error while reading from Writer: %s", err) + } + reader.Close() +} + +func writerFinalizer(writer *io.PipeWriter) { + writer.Close() +} diff --git a/vendor/k8s.io/test-infra/LICENSE b/vendor/k8s.io/test-infra/LICENSE new file mode 100644 index 00000000..deeaa08f --- /dev/null +++ b/vendor/k8s.io/test-infra/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016 The Kubernetes Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/k8s.io/test-infra/gubernator/github/secrets.py b/vendor/k8s.io/test-infra/gubernator/github/secrets.py new file mode 120000 index 00000000..6afb66b5 --- /dev/null +++ b/vendor/k8s.io/test-infra/gubernator/github/secrets.py @@ -0,0 +1 @@ +../secrets.py \ No newline at end of file diff --git a/vendor/k8s.io/test-infra/gubernator/static/octicons/LICENSE b/vendor/k8s.io/test-infra/gubernator/static/octicons/LICENSE new file mode 100644 index 00000000..4cf2020c --- /dev/null +++ b/vendor/k8s.io/test-infra/gubernator/static/octicons/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2012-2016 GitHub, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/k8s.io/test-infra/prow/cmd/tide/pr-authors.md b/vendor/k8s.io/test-infra/prow/cmd/tide/pr-authors.md new file mode 100644 index 00000000..8c73da5a --- /dev/null +++ b/vendor/k8s.io/test-infra/prow/cmd/tide/pr-authors.md @@ -0,0 +1,38 @@ +# PR Author's Guide to Tide + +If you just want to figure out how to get your PR to merge this is the document for you! + +## Sources of Information + +1. The `tide` status context at the bottom of your PR. +The status either indicates that your PR is in the merge pool or explains why it is not in the merge pool. The 'Details' link will take you to either the Tide or PR dashboard. +![Tide Status Context](/prow/cmd/tide/status-context.png) +1. The PR dashboard at "``/pr" where `` is something like "https://prow.k8s.io". +This dashboard shows a card for each of your PRs. Each card shows the current test results for the PR and the difference between the PR state and the merge criteria. [K8s PR dashboard](https://prow.k8s.io/pr) +1. The Tide dashboard at "``/tide". +This dashboard shows the state of every merge pool so that you can see what Tide is currently doing and what position your PR has in the retest queue. [K8s Tide dashboard](https://prow.k8s.io/tide) + +## Get your PR merged by asking these questions + +#### "Is my PR in the merge pool?" + +If the `tide` status at the bottom of your PR is successful (green) it is in the merge pool. If it is pending (yellow) it is *not* in the merge pool. + +#### "Why is my PR not in the merge pool?" + +First, if you just made a change to the PR, give Tide a minute or two to react. Tide syncs periodically (1m period default) so you shouldn't expect to see immediate reactions. + +To determine why your PR is not in the merge pool you have a couple options. +1. The `tide` status context at the bottom of your PR will describe at least one of the merge criteria that is not being met. The status has limited space for text so only a few failing criteria can typically be listed. To see all merge criteria that are not being met check out the PR dashboard. +1. The PR dashboard shows the difference between your PR's state and the merge criteria so that you can easily see all criteria that are not being met and address them in any order or in parallel. + + +#### "My PR is in the merge pool, what now?" + +Once your PR is in the merge pool it is queued for merge and will be automatically retested before merge if necessary. So **typically your work is done!** +The one exception is if your PR fails a retest. This will cause the PR to be removed from the merge pool until it is fixed and is passing all the required tests again. + +If you are eager for your PR to merge you can view all the PRs in the pool on the Tide dashboard to see where your PR is in the queue. Because we give older PRs (lower numbers) priority, it is possible for a PR's position in the queue to increase. + +Note: Batches of PRs are given priority over individual PRs so even if your PR is in the pool and has up-to-date tests it won't merge while a batch is running because merging would update the base branch making the batch jobs stale before they complete. +Similarly, whenever any other PR in the pool is merged, existing test results for your PR become stale and a retest becomes necessary before merge. However, your PR remains in the pool and will be automatically retested so this doesn't require any action from you. diff --git a/vendor/k8s.io/test-infra/prow/crier/README.md b/vendor/k8s.io/test-infra/prow/crier/README.md new file mode 120000 index 00000000..64cac281 --- /dev/null +++ b/vendor/k8s.io/test-infra/prow/crier/README.md @@ -0,0 +1 @@ +../cmd/crier/README.md \ No newline at end of file diff --git a/vendor/k8s.io/test-infra/prow/logrusutil/logrusutil.go b/vendor/k8s.io/test-infra/prow/logrusutil/logrusutil.go new file mode 100644 index 00000000..25c0465f --- /dev/null +++ b/vendor/k8s.io/test-infra/prow/logrusutil/logrusutil.go @@ -0,0 +1,65 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package logrusutil implements some helpers for using logrus +package logrusutil + +import ( + "github.com/sirupsen/logrus" +) + +// DefaultFieldsFormatter wraps another logrus.Formatter, injecting +// DefaultFields into each Format() call, existing fields are preserved +// if they have the same key +type DefaultFieldsFormatter struct { + WrappedFormatter logrus.Formatter + DefaultFields logrus.Fields +} + +// NewDefaultFieldsFormatter returns a DefaultFieldsFormatter, +// if wrappedFormatter is nil &logrus.JSONFormatter{} will be used instead +func NewDefaultFieldsFormatter( + wrappedFormatter logrus.Formatter, defaultFields logrus.Fields, +) *DefaultFieldsFormatter { + res := &DefaultFieldsFormatter{ + WrappedFormatter: wrappedFormatter, + DefaultFields: defaultFields, + } + if res.WrappedFormatter == nil { + res.WrappedFormatter = &logrus.JSONFormatter{} + } + return res +} + +// Format implements logrus.Formatter's Format. We allocate a new Fields +// map in order to not modify the caller's Entry, as that is not a thread +// safe operation. +func (d *DefaultFieldsFormatter) Format(entry *logrus.Entry) ([]byte, error) { + data := make(logrus.Fields, len(entry.Data)+len(d.DefaultFields)) + for k, v := range d.DefaultFields { + data[k] = v + } + for k, v := range entry.Data { + data[k] = v + } + return d.WrappedFormatter.Format(&logrus.Entry{ + Logger: entry.Logger, + Data: data, + Time: entry.Time, + Level: entry.Level, + Message: entry.Message, + }) +} diff --git a/vendor/k8s.io/test-infra/prow/tide/README.md b/vendor/k8s.io/test-infra/prow/tide/README.md new file mode 120000 index 00000000..86a6ab32 --- /dev/null +++ b/vendor/k8s.io/test-infra/prow/tide/README.md @@ -0,0 +1 @@ +../cmd/tide/README.md \ No newline at end of file