diff --git a/.codeclimate.yml b/.codeclimate.yml index fa80364343..cf68a37317 100644 --- a/.codeclimate.yml +++ b/.codeclimate.yml @@ -3,9 +3,15 @@ engines: enabled: true govet: enabled: true + golint: + enabled: true + fixme: + enabled: true ratings: paths: - "**.go" exclude_paths: - vendor/ - internal/gps/_testdata + - cmd/dep/testdata + - testdata diff --git a/.travis.yml b/.travis.yml index b08779e41c..079b64265e 100644 --- a/.travis.yml +++ b/.travis.yml @@ -3,42 +3,53 @@ language: go sudo: false notifications: email: false -matrix: +jobs: include: - - os: linux - go: 1.7.x - - os: linux + - stage: test + install: + - go get -u honnef.co/go/tools/cmd/{gosimple,staticcheck} + - npm install -g codeclimate-test-reporter + env: + - DEPTESTBYPASS501=1 + os: linux go: 1.8.x - - os: linux + script: + - go build -v ./cmd/dep + - PKGS=$(go list ./... | grep -v /vendor/) + - go vet $PKGS + - staticcheck $PKGS + - gosimple $PKGS + - ./hack/validate-vendor.bash + - go build ./hack/licenseok + - find . -path ./vendor -prune -o -type f -name "*.go" -printf '%P\n' | xargs ./licenseok + - set -e; for pkg in $PKGS; do go test -race -coverprofile=profile.out -covermode=atomic $pkg; if [[ -f profile.out ]]; then cat profile.out >> coverage.txt; rm profile.out; fi; done + after_success: + - codeclimate-test-reporter < coverage.txt + # YAML alias, for settings shared across the simpler builds + - &simple-test + go: 1.7.x + stage: test + install: skip + env: + - DEPTESTBYPASS501=1 + script: go test -race $(go list ./... | grep -v vendor) + - <<: *simple-test go: tip - - os: osx + - <<: *simple-test + os: osx go: 1.8.x -env: - # Flip bit to bypass tests - see dep#501 for more information - - DEPTESTBYPASS501=1 -install: - - echo "This is an override of the default install deps step in travis." -before_script: - # OSX as of El Capitan sets an exit trap that interacts poorly with our - # set -e below. So, unset the trap. - # Related: https://superuser.com/questions/1044130/why-am-i-having-how-can-i-fix-this-error-shell-session-update-command-not-f - - if [[ "$(go env GOHOSTOS)" == "darwin" ]]; then trap EXIT; fi - - if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then brew update; fi - - if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then brew install bzr; fi - - PKGS=$(go list ./... | grep -v /vendor/ | grep -v _testdata/ ) - - go get -v honnef.co/go/tools/cmd/{gosimple,staticcheck} - - npm install -g codeclimate-test-reporter -script: - - go build -v ./cmd/dep - - go vet $PKGS - - staticcheck $PKGS - #- ./hack/validate-gofmt.bash - - ./hack/validate-vendor.bash - - gosimple $PKGS - #- go test -race $PKGS - - go build ./hack/licenseok - - set -e; for pkg in $PKGS; do go test -race -coverprofile=profile.out -covermode=atomic $pkg; if [[ -f profile.out ]]; then cat profile.out >> coverage.txt; rm profile.out; fi; done - - find . -path ./vendor -prune -o -type f -name "*.go" -printf '%P\n' | xargs ./licenseok - - ./hack/validate-vendor.bash -after_success: - - codeclimate-test-reporter < coverage.txt + install: + # brew takes horribly long to update itself despite the above caching + # attempt; only bzr install if it's not on the $PATH + - test $(which bzr) || brew install bzr + env: + - HOMEBREW_NO_AUTO_UPDATE=1 + - DEPTESTBYPASS501=1 + script: + # OSX as of El Capitan sets an exit trap that interacts poorly with how + # travis seems to spawn these shells; if set -e is set, then it can cause + # build failures. We're not doing that here, but retain the trap statement + # for future safety. + # Related: https://superuser.com/questions/1044130/why-am-i-having-how-can-i-fix-this-error-shell-session-update-command-not-f + - trap EXIT + - go test -race $(go list ./... | grep -v vendor) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 6194bad69f..09a4bacaa4 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -26,6 +26,12 @@ The gophers there will answer or ask you to file an issue if you've tripped over Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html) before sending patches. +The +[help-wanted](https://github.com/golang/dep/issues?q=is%3Aissue+is%3Aopen+label%3Ahelp-wanted) +label highlights issues that are well-suited for folks to jump in on. The +[good-first-pr](https://github.com/golang/dep/issues?q=is%3Aissue+is%3Aopen+label%3Agood-first-pr) +label further identifies issues that are particularly well-sized for newcomers. + Unless otherwise noted, the Dep source files are distributed under the BSD-style license found in the LICENSE file. diff --git a/FAQ.md b/FAQ.md index a4d69a57ed..71db50d4d8 100644 --- a/FAQ.md +++ b/FAQ.md @@ -7,7 +7,7 @@ Please contribute to the FAQ! Found an explanation in an issue or pull request h Summarize the question and quote the reply, linking back to the original comment. * [What is the difference between Gopkg.toml (the "manifest") and Gopkg.lock (the "lock")?](#what-is-the-difference-between-gopkgtoml-the-manifest-and-gopkglock-the-lock) -* [When should I use dependencies, overrides or required in the manifest?](#when-should-i-use-dependencies-overrides-required-or-ignored-in-the-manifest) +* [When should I use `constraint`, `override` `required`, or `ignored` in the Gopkg.toml?](#when-should-i-use-constraint-override-required-or-ignored-in-gopkgtoml) * [What is a direct or transitive dependency?](#what-is-a-direct-or-transitive-dependency) * [Should I commit my vendor directory?](#should-i-commit-my-vendor-directory) * [Why is it `dep ensure` instead of `dep install`?](#why-is-it-dep-ensure-instead-of-dep-install) @@ -18,6 +18,12 @@ Summarize the question and quote the reply, linking back to the original comment * [Can I put the manifest and lock in the vendor directory?](#can-i-put-the-manifest-and-lock-in-the-vendor-directory) * [Why did dep use a different revision for package X instead of the revision in the lock file?](#why-did-dep-use-a-different-revision-for-package-x-instead-of-the-revision-in-the-lock-file) * [Why is `dep` slow?](#why-is-dep-slow) +* [How does `dep` handle symbolic links?](#how-does-dep-handle-symbolic-links) +* [How do I roll releases that `dep` will be able to use?](#how-do-i-roll-releases-that-dep-will-be-able-to-use) +* [How does `dep` decide what version of a dependency to use?](#how-does-dep-decide-what-version-of-a-dependency-to-use) +* [What semver version should I use?](#what-semver-version-should-i-use) +* [Is it OK to make backwards-incompatible changes now?](#is-it-ok-to-make-backwards-incompatible-changes-now) +* [My dependers don't use `dep` yet. What should I do?](#my-dependers-dont-use-dep-yet-what-should-i-do) ## What is the difference between Gopkg.toml (the "manifest") and Gopkg.lock (the "lock")? @@ -26,10 +32,10 @@ Summarize the question and quote the reply, linking back to the original comment > This flexibility is important because it allows us to provide easy commands (e.g. `dep ensure -update`) that can manage an update process for you, within the constraints you specify, AND because it allows your project, when imported by someone else, to collaboratively specify the constraints for your own dependencies. -[@sdboyer in #281](https://github.com/golang/dep/issues/281#issuecomment-284118314) -## When should I use dependencies, overrides, required, or ignored in the manifest? +## When should I use `constraint`, `override`, `required`, or `ignored` in `Gopkg.toml`? -* Use `dependencies` to constrain a [direct dependency](#what-is-a-direct-or-transitive-dependency) to a specific branch, version range, revision, or specify an alternate source such as a fork. -* Use `overrides` to constrain a [transitive dependency](#what-is-a-direct-or-transitive-dependency). See [How do I constrain a transitive dependency's version?](#how-do-i-constrain-a-transitive-dependencys-version) for more details on how overrides differ from dependencies. Overrides should be used cautiously, sparingly, and temporarily. +* Use `constraint` to constrain a [direct dependency](#what-is-a-direct-or-transitive-dependency) to a specific branch, version range, revision, or specify an alternate source such as a fork. +* Use `override` to constrain a [transitive dependency](#what-is-a-direct-or-transitive-dependency). See [How do I constrain a transitive dependency's version?](#how-do-i-constrain-a-transitive-dependencys-version) for more details on how overrides differ from dependencies. Overrides should be used cautiously, sparingly, and temporarily. * Use `required` to explicitly add a dependency that is not imported directly or transitively, for example a development package used for code generation. * Use `ignored` to ignore a package and any of that package's unique dependencies. @@ -113,7 +119,7 @@ behave differently: Overrides are also discussed with some visuals in [the gps docs](https://github.com/sdboyer/gps/wiki/gps-for-Implementors#overrides). ## `dep` deleted my files in the vendor directory! -First, sorry! 😞 We hope you were able to recover your files... +If you just ran `dep init`, there should be a copy of your original vendor directory named `_vendor-TIMESTAMP` in your project root. The other commands do not make a backup before modifying the vendor directory. > dep assumes complete control of vendor/, and may indeed blow things away if it feels like it. -[@peterbourgon in #206](https://github.com/golang/dep/issues/206#issuecomment-277139419) @@ -178,4 +184,118 @@ gateway to all of these improvements. There's another major performance issue that's much harder - the process of picking versions itself is an NP-complete problem in `dep`'s current design. This is a much trickier problem 😜 +## How does `dep` handle symbolic links? +> because we're not crazy people who delight in inviting chaos into our lives, we need to work within one GOPATH at a time. +-[@sdboyer in #247](https://github.com/golang/dep/pull/247#issuecomment-284181879) + +Out of convenience, one might create a symlink to a directory within their `GOPATH`, e.g. `ln -s ~/go/src/github.com/golang/dep dep`. When `dep` is invoked it will resolve the current working directory accordingly: + +- If the cwd is a symlink outside a `GOPATH` and links to directory within a `GOPATH`, or vice versa, `dep` chooses whichever path is within the `GOPATH`. If neither path is within a `GOPATH`, `dep` produces an error. +- If both the cwd and resolved path are in the same `GOPATH`, an error is thrown since the users intentions and expectations can't be accurately deduced. +- If the symlink is within a `GOPATH` and the real path is within a *different* `GOPATH` - an error is thrown. + +This is the only symbolic link support that `dep` really intends to provide. In keeping with the general practices of the `go` tool, `dep` tends to either ignore symlinks (when walking) or copy the symlink itself, depending on the filesystem operation being performed. + +## How do I roll releases that `dep` will be able to use? + +In short: make sure you've committed your `Gopkg.toml` and `Gopkg.lock`, then +just create a tag in your version control system and push it to the canonical +location. `dep` is designed to work automatically with this sort of metadata +from `git`, `bzr`, and `hg`. + +It's strongly preferred that you use [semver](http://semver.org)-compliant tag +names. We hope to develop documentation soon that describes this more precisely, +but in the meantime, the [npm](https://docs.npmjs.com/misc/semver) docs match +our patterns pretty well. + +## How does `dep` decide what version of a dependency to use? + +The full algorithm is complex, but the most important thing to understand is +that `dep` tries versions in a [certain +order](https://godoc.org/github.com/golang/dep/internal/gps#SortForUpgrade), +checking to see a version is acceptable according to specified constraints. + +- All semver versions come first, and sort mostly according to the semver 2.0 + spec, with one exception: + - Semver versions with a prerelease are sorted after *all* non-prerelease + semver. Within this subset they are sorted first by their numerical + component, then lexicographically by their prerelease version. +- The default branch(es) are next; the semantics of what "default branch" means + are specific to the underlying source type, but this is generally what you'd + get from a `go get`. +- All other branches come next, sorted lexicographically. +- All non-semver versions (tags) are next, sorted lexicographically. +- Revisions, if any, are last, sorted lexicographically. Revisions do not + typically appear in version lists, so the only invariant we maintain is + determinism - deeper semantics, like chronology or topology, do not matter. + +So, given a slice of the following versions: + +- Branch: `master` `devel` +- Semver tags: `v1.0.0` `v1.1.0` `v1.1.0-alpha1` +- Non-semver tags: `footag` +- Revision: `f6e74e8d` +Sorting for upgrade will result in the following slice. + +`[v1.1.0 v1.0.0 v1.1.0-alpha1 footag devel master f6e74e8d]` + +There are a number of factors that can eliminate a version from consideration, +the simplest of which is that it doesn't match a constraint. But if you're +trying to figure out why `dep` is doing what it does, understanding that its +basic action is to attempt versions in this order should help you to reason +about what's going on. + +## What semver version should I use? + +This can be a nuanced question, and the community is going to have to work out +some accepted standards for how semver should be applied to Go projects. At the +highest level, though, these are the rules: + +* Below `v1.0.0`, anything goes. Use these releases to figure out what you want + your API to be. +* Above `v1.0.0`, the general Go best practices continue to apply - don't make + backwards-incompatible changes - exported identifiers can be added to, but + not changed or removed. +* If you must make a backwards-incompatible change, then bump the major version. + +It's important to note that having a `v1.0.0` does not preclude you from having +alpha/beta/etc releases. The semver spec allows for [prerelease +versions](http://semver.org/#spec-item-9), and `dep` is careful to _not_ allow +such versions unless `Gopkg.toml` contains a range constraint that explicitly +includes prereleases: if there exists a version `v1.0.1-alpha4`, then the +constraint `>=1.0.0` will not match it, but `>=1.0.1-alpha1` will. + +Some work has been done towards [a tool +to](https://github.com/bradleyfalzon/apicompat) that will analyze and compare +your code with the last release, and suggest the next version you should use. + +## Is it OK to make backwards-incompatible changes now? + +Yes. But. + +`dep` will make it possible for the Go ecosystem to handle +backwards-incompatible changes more gracefully. However, `dep` is not some +magical panacea. Version and dependency management is hard, and dependency hell +is real. The longstanding community wisdom about avoiding breaking changes +remains important. Any `v1.0.0` release should be accompanied by a plan for how +to avoid future breaking API changes. + +One good strategy may be to add to your API instead of changing it, deprecating +old versions as you progress. Then, when the time is right, you can roll a new +major version and clean out a bunch of deprecated symbols all at once. + +Note that providing an incremental migration path across breaking changes (i.e., +shims) is tricky, and something we [don't have a good answer for +yet](https://groups.google.com/forum/#!topic/go-package-management/fp2uBMf6kq4). + +## My dependers don't use `dep` yet. What should I do? + +For the most part, you needn't do anything differently. + +The only possible issue is if your project is ever consumed as a library. If +so, then you may want to be wary about committing your `vendor/` directory, as +it can [cause +problems](https://groups.google.com/d/msg/golang-nuts/AnMr9NL6dtc/UnyUUKcMCAAJ). +If your dependers are using `dep`, this is not a concern, as `dep` takes care of +stripping out nested `vendor` directories. diff --git a/Gopkg.lock b/Gopkg.lock index adf97a6dad..462c222384 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -1,10 +1,11 @@ -memo = "932b7b1663f6eecccb1fada1d3670ae24cd8aa7c8b61e3b224edfefebe25954e" +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + [[projects]] branch = "2.x" name = "github.com/Masterminds/semver" packages = ["."] - revision = "94ad6eaf8457cf85a68c9b53fa42e9b1b8683783" + revision = "139cc0982c53f1367af5636b12b7643cd03757fc" [[projects]] name = "github.com/Masterminds/vcs" @@ -18,6 +19,12 @@ memo = "932b7b1663f6eecccb1fada1d3670ae24cd8aa7c8b61e3b224edfefebe25954e" packages = ["."] revision = "4239b77079c7b5d1243b7b4736304ce8ddb6f0f2" +[[projects]] + branch = "v2" + name = "github.com/go-yaml/yaml" + packages = ["."] + revision = "cd8b52f8269e0feb286dfeef29f8fe4d5b397e0b" + [[projects]] name = "github.com/pelletier/go-buffruneio" packages = ["."] @@ -41,3 +48,10 @@ memo = "932b7b1663f6eecccb1fada1d3670ae24cd8aa7c8b61e3b224edfefebe25954e" name = "github.com/sdboyer/constext" packages = ["."] revision = "836a144573533ea4da4e6929c235fd348aed1c80" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + inputs-digest = "1096dfb111cbe243aa24ea824ea3e1db7bb178c01d5565107c6d9290d225d722" + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml index 26c6b5de3f..5cd77552db 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -1,17 +1,19 @@ -required = ["github.com/Masterminds/semver"] - -[[dependencies]] +[[constraint]] branch = "2.x" name = "github.com/Masterminds/semver" -[[dependencies]] +[[constraint]] name = "github.com/Masterminds/vcs" - version = "^1.11.0" + version = "1.11.0" + +[[constraint]] + branch = "v2" + name = "github.com/go-yaml/yaml" -[[dependencies]] +[[constraint]] branch = "master" name = "github.com/pelletier/go-toml" -[[dependencies]] +[[constraint]] name = "github.com/pkg/errors" - version = ">=0.8.0, <1.0.0" + version = "0.8.0" diff --git a/README.md b/README.md index b03c371bbc..a7330258a7 100644 --- a/README.md +++ b/README.md @@ -9,10 +9,15 @@ Dep is a prototype dependency management tool. It requires Go 1.7 or newer to co ## Current status **Alpha**. -Functionality is known to be broken, missing or incomplete. Command and file format changes are still planned. +Functionality is known to be broken, missing or incomplete. Changes are planned +to the CLI commands soon. *It would be unwise to write scripts atop `dep` before then.* The repository is open to solicit feedback and contributions from the community. Please see below for feedback and contribution guidelines. +`Gopkg.toml` and `Gopkg.lock` have reached a stable structure, and it is safe to +commit them in your projects. We plan to add more to these files, but we +guarantee these changes will be backwards-compatible. + ## Context - [The Saga of Go Dependency Management](https://blog.gopheracademy.com/advent-2016/saga-go-dependency-management/) @@ -44,11 +49,7 @@ To update a dependency to a new version, you might run $ dep ensure github.com/pkg/errors@^0.8.0 ``` -See the help text for much more detailed usage instructions. - -Note that **the manifest and lock file formats are not finalized**, and will likely change before the tool is released. -We make no compatibility guarantees for the time being. -Please don't commit any code or files created with the tool. +See the help text for more detailed usage instructions. ## Feedback diff --git a/analyzer.go b/analyzer.go index a2076691d9..a0a83a8036 100644 --- a/analyzer.go +++ b/analyzer.go @@ -8,20 +8,25 @@ import ( "os" "path/filepath" + "github.com/golang/dep/internal/fs" "github.com/golang/dep/internal/gps" ) type Analyzer struct{} -func (a Analyzer) DeriveManifestAndLock(path string, n gps.ProjectRoot) (gps.Manifest, gps.Lock, error) { - // TODO: If we decide to support other tools manifest, this is where we would need - // to add that support. +// HasDepMetadata determines if a dep manifest exists at the specified path. +func (a Analyzer) HasDepMetadata(path string) bool { mf := filepath.Join(path, ManifestName) - if fileOK, err := IsRegular(mf); err != nil || !fileOK { - // Do not return an error, when does not exist. + fileOK, err := fs.IsRegular(mf) + return err == nil && fileOK +} + +func (a Analyzer) DeriveManifestAndLock(path string, n gps.ProjectRoot) (gps.Manifest, gps.Lock, error) { + if !a.HasDepMetadata(path) { return nil, nil, nil } - f, err := os.Open(mf) + + f, err := os.Open(filepath.Join(path, ManifestName)) if err != nil { return nil, nil, err } diff --git a/appveyor.yml b/appveyor.yml index ffd4e082be..b69fc971ce 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -34,4 +34,4 @@ deploy: false test_script: - go build github.com/golang/dep/cmd/dep - - for /f "" %%G in ('go list github.com/golang/dep/... ^| find /i /v "/vendor/"') do @go test %%G + - for /f "" %%G in ('go list github.com/golang/dep/... ^| find /i /v "/vendor/"') do ( go test %%G & IF ERRORLEVEL == 1 EXIT 1) diff --git a/cmd/dep/ensure.go b/cmd/dep/ensure.go index d325a0d0f7..9e20e90bcc 100644 --- a/cmd/dep/ensure.go +++ b/cmd/dep/ensure.go @@ -16,6 +16,7 @@ import ( "github.com/golang/dep" "github.com/golang/dep/internal/gps" + "github.com/golang/dep/internal/gps/paths" "github.com/golang/dep/internal/gps/pkgtree" "github.com/pkg/errors" ) @@ -149,7 +150,7 @@ func (cmd *ensureCommand) Run(ctx *dep.Ctx, args []string) error { } } - p, err := ctx.LoadProject("") + p, err := ctx.LoadProject() if err != nil { return err } @@ -247,7 +248,7 @@ func (cmd *ensureCommand) runDefault(ctx *dep.Ctx, args []string, p *dep.Project return errors.Wrap(err, "ensure Solve()") } - sw, err := dep.NewSafeWriter(nil, p.Lock, dep.LockFromInterface(solution), dep.VendorOnChanged) + sw, err := dep.NewSafeWriter(nil, p.Lock, dep.LockFromSolution(solution), dep.VendorOnChanged) if err != nil { return err } @@ -327,7 +328,7 @@ func (cmd *ensureCommand) runUpdate(ctx *dep.Ctx, args []string, p *dep.Project, return errors.Wrap(err, "ensure Solve()") } - sw, err := dep.NewSafeWriter(nil, p.Lock, dep.LockFromInterface(solution), dep.VendorOnChanged) + sw, err := dep.NewSafeWriter(nil, p.Lock, dep.LockFromSolution(solution), dep.VendorOnChanged) if err != nil { return err } @@ -338,7 +339,7 @@ func (cmd *ensureCommand) runUpdate(ctx *dep.Ctx, args []string, p *dep.Project, return sw.PrintPreparedActions(ctx.Loggers.Out) } - return errors.Wrap(sw.Write(p.AbsRoot, sm, true), "grouped write of manifest, lock and vendor") + return errors.Wrap(sw.Write(p.AbsRoot, sm, false), "grouped write of manifest, lock and vendor") } func (cmd *ensureCommand) runAdd(ctx *dep.Ctx, args []string, p *dep.Project, sm gps.SourceManager, params gps.SolveParameters) error { @@ -376,7 +377,7 @@ func (cmd *ensureCommand) runAdd(ctx *dep.Ctx, args []string, p *dep.Project, sm exmap := make(map[string]bool) exrmap := make(map[gps.ProjectRoot]bool) - for _, ex := range append(rm.Flatten(false), p.Manifest.Required...) { + for _, ex := range append(rm.FlattenFn(paths.IsStandardImportPath), p.Manifest.Required...) { exmap[ex] = true root, err := sm.DeduceProjectRoot(ex) if err != nil { @@ -432,7 +433,7 @@ func (cmd *ensureCommand) runAdd(ctx *dep.Ctx, args []string, p *dep.Project, sm reqlist = append(reqlist, arg) p.Manifest.Required = append(p.Manifest.Required, arg) } else { - p.Manifest.Dependencies[pc.Ident.ProjectRoot] = gps.ProjectProperties{ + p.Manifest.Constraints[pc.Ident.ProjectRoot] = gps.ProjectProperties{ Source: pc.Ident.Source, Constraint: pc.Constraint, } @@ -449,7 +450,7 @@ func (cmd *ensureCommand) runAdd(ctx *dep.Ctx, args []string, p *dep.Project, sm } } } else { - p.Manifest.Dependencies[pc.Ident.ProjectRoot] = gps.ProjectProperties{ + p.Manifest.Constraints[pc.Ident.ProjectRoot] = gps.ProjectProperties{ Source: pc.Ident.Source, Constraint: pc.Constraint, } @@ -472,7 +473,7 @@ func (cmd *ensureCommand) runAdd(ctx *dep.Ctx, args []string, p *dep.Project, sm return errors.Wrap(err, "ensure Solve()") } - sw, err := dep.NewSafeWriter(nil, p.Lock, dep.LockFromInterface(solution), dep.VendorOnChanged) + sw, err := dep.NewSafeWriter(nil, p.Lock, dep.LockFromSolution(solution), dep.VendorOnChanged) // TODO(sdboyer) special handling for warning cases as described in spec - // e.g., named projects did not upgrade even though newer versions were // available. @@ -500,14 +501,14 @@ func applyEnsureArgs(logger *log.Logger, args []string, overrides stringSlice, p // // TODO(sdboyer): for this case - or just in general - do we want to // add project args to the requires list temporarily for this run? - if _, has := p.Manifest.Dependencies[pc.Ident.ProjectRoot]; !has { + if _, has := p.Manifest.Constraints[pc.Ident.ProjectRoot]; !has { logger.Printf("dep: No constraint or alternate source specified for %q, omitting from manifest\n", pc.Ident.ProjectRoot) } // If it's already in the manifest, no need to log continue } - p.Manifest.Dependencies[pc.Ident.ProjectRoot] = gps.ProjectProperties{ + p.Manifest.Constraints[pc.Ident.ProjectRoot] = gps.ProjectProperties{ Source: pc.Ident.Source, Constraint: pc.Constraint, } @@ -559,76 +560,70 @@ func (s *stringSlice) Set(value string) error { func getProjectConstraint(arg string, sm gps.SourceManager) (gps.ProjectConstraint, error) { // TODO(sdboyer) this func needs to be broken out, now that we admit // different info in specs - constraint := gps.ProjectConstraint{ + emptyPC := gps.ProjectConstraint{ Constraint: gps.Any(), // default to any; avoids panics later } // try to split on '@' - var versionStr string + // When there is no `@`, use any version + versionStr := "*" atIndex := strings.Index(arg, "@") if atIndex > 0 { parts := strings.SplitN(arg, "@", 2) arg = parts[0] versionStr = parts[1] - constraint.Constraint = deduceConstraint(parts[1]) } - // TODO: What if there is no @, assume default branch (which may not be master) ? + // TODO: if we decide to keep equals..... // split on colon if there is a network location + var source string colonIndex := strings.Index(arg, ":") if colonIndex > 0 { parts := strings.SplitN(arg, ":", 2) arg = parts[0] - constraint.Ident.Source = parts[1] + source = parts[1] } pr, err := sm.DeduceProjectRoot(arg) if err != nil { - return constraint, errors.Wrapf(err, "could not infer project root from dependency path: %s", arg) // this should go through to the user + return emptyPC, errors.Wrapf(err, "could not infer project root from dependency path: %s", arg) // this should go through to the user } if string(pr) != arg { - return constraint, errors.Errorf("dependency path %s is not a project root, try %s instead", arg, pr) + return emptyPC, errors.Errorf("dependency path %s is not a project root, try %s instead", arg, pr) } - constraint.Ident.ProjectRoot = gps.ProjectRoot(arg) - - // Below we are checking if the constraint we deduced was valid. - if v, ok := constraint.Constraint.(gps.Version); ok && versionStr != "" { - if v.Type() == gps.IsVersion { - // we hit the fall through case in deduce constraint, let's call out to network - // and get the package's versions - versions, err := sm.ListVersions(constraint.Ident) - if err != nil { - return constraint, errors.Wrapf(err, "list versions for %s", arg) // means repo does not exist - } + pi := gps.ProjectIdentifier{ProjectRoot: pr, Source: source} + c, err := deduceConstraint(versionStr, pi, sm) + if err != nil { + return emptyPC, err + } + return gps.ProjectConstraint{Ident: pi, Constraint: c}, nil +} - var found bool - for _, version := range versions { - if versionStr == version.String() { - found = true - constraint.Constraint = version.Unpair() - break - } - } +// deduceConstraint tries to puzzle out what kind of version is given in a string - +// semver, a revision, or as a fallback, a plain tag +func deduceConstraint(s string, pi gps.ProjectIdentifier, sm gps.SourceManager) (gps.Constraint, error) { + if s == "" { + // Find the default branch + versions, err := sm.ListVersions(pi) + if err != nil { + return nil, errors.Wrapf(err, "list versions for %s(%s)", pi.ProjectRoot, pi.Source) // means repo does not exist + } - if !found { - return constraint, errors.Errorf("%s is not a valid version for the package %s", versionStr, arg) + gps.SortPairedForUpgrade(versions) + for _, v := range versions { + if v.Type() == gps.IsBranch { + return v.Unpair(), nil } } } - return constraint, nil -} - -// deduceConstraint tries to puzzle out what kind of version is given in a string - -// semver, a revision, or as a fallback, a plain tag -func deduceConstraint(s string) gps.Constraint { // always semver if we can - c, err := gps.NewSemverConstraint(s) + c, err := gps.NewSemverConstraintIC(s) if err == nil { - return c + return c, nil } slen := len(s) @@ -637,7 +632,7 @@ func deduceConstraint(s string) gps.Constraint { // Whether or not it's intended to be a SHA1 digest, this is a // valid byte sequence for that, so go with Revision. This // covers git and hg - return gps.Revision(s) + return gps.Revision(s), nil } } // Next, try for bzr, which has a three-component GUID separated by @@ -648,20 +643,29 @@ func deduceConstraint(s string) gps.Constraint { i3 := strings.LastIndex(s, "-") // Skip if - is last char, otherwise this would panic on bounds err if slen == i3+1 { - return gps.NewVersion(s) + return gps.NewVersion(s), nil } i2 := strings.LastIndex(s[:i3], "-") if _, err = strconv.ParseUint(s[i2+1:i3], 10, 64); err == nil { // Getting this far means it'd pretty much be nuts if it's not a // bzr rev, so don't bother parsing the email. - return gps.Revision(s) + return gps.Revision(s), nil } } - // If not a plain SHA1 or bzr custom GUID, assume a plain version. - // TODO: if there is amgibuity here, then prompt the user? - return gps.NewVersion(s) + // call out to network and get the package's versions + versions, err := sm.ListVersions(pi) + if err != nil { + return nil, errors.Wrapf(err, "list versions for %s(%s)", pi.ProjectRoot, pi.Source) // means repo does not exist + } + + for _, version := range versions { + if s == version.String() { + return version.Unpair(), nil + } + } + return nil, errors.Errorf("%s is not a valid version for the package %s(%s)", s, pi.ProjectRoot, pi.Source) } func checkErrors(m map[string]pkgtree.PackageOrErr) error { diff --git a/cmd/dep/ensure_test.go b/cmd/dep/ensure_test.go index f8fdd40456..478e686e13 100644 --- a/cmd/dep/ensure_test.go +++ b/cmd/dep/ensure_test.go @@ -5,34 +5,72 @@ package main import ( + "reflect" "testing" "github.com/golang/dep/internal/gps" + "github.com/golang/dep/internal/test" ) func TestDeduceConstraint(t *testing.T) { - sv, err := gps.NewSemverConstraint("v1.2.3") + t.Parallel() + h := test.NewHelper(t) + cacheDir := "gps-repocache" + h.TempDir(cacheDir) + sm, err := gps.NewSourceManager(h.Path(cacheDir)) + h.Must(err) + + sv, err := gps.NewSemverConstraintIC("v0.8.1") if err != nil { t.Fatal(err) } constraints := map[string]gps.Constraint{ - "v1.2.3": sv, + "v0.8.1": sv, + "master": gps.NewBranch("master"), "5b3352dc16517996fb951394bcbbe913a2a616e3": gps.Revision("5b3352dc16517996fb951394bcbbe913a2a616e3"), - // valid bzr revs + // valid bzr rev "jess@linux.com-20161116211307-wiuilyamo9ian0m7": gps.Revision("jess@linux.com-20161116211307-wiuilyamo9ian0m7"), + // invalid bzr rev + "go4@golang.org-sadfasdf-": gps.NewVersion("go4@golang.org-sadfasdf-"), + } + + pi := gps.ProjectIdentifier{ProjectRoot: "github.com/sdboyer/deptest"} + for str, want := range constraints { + got, err := deduceConstraint(str, pi, sm) + h.Must(err) + + wantT := reflect.TypeOf(want) + gotT := reflect.TypeOf(got) + if wantT != gotT { + t.Errorf("expected type: %s, got %s, for input %s", wantT, gotT, str) + } + if got.String() != want.String() { + t.Errorf("expected value: %s, got %s for input %s", want, got, str) + } + } +} + +func TestDeduceConstraint_InvalidInput(t *testing.T) { + h := test.NewHelper(t) + + cacheDir := "gps-repocache" + h.TempDir(cacheDir) + sm, err := gps.NewSourceManager(h.Path(cacheDir)) + h.Must(err) + constraints := []string{ // invalid bzr revs - "go4@golang.org-lskjdfnkjsdnf-ksjdfnskjdfn": gps.NewVersion("go4@golang.org-lskjdfnkjsdnf-ksjdfnskjdfn"), - "go4@golang.org-sadfasdf-": gps.NewVersion("go4@golang.org-sadfasdf-"), - "20120425195858-psty8c35ve2oej8t": gps.NewVersion("20120425195858-psty8c35ve2oej8t"), + "go4@golang.org-lskjdfnkjsdnf-ksjdfnskjdfn", + "20120425195858-psty8c35ve2oej8t", } - for str, expected := range constraints { - c := deduceConstraint(str) - if c != expected { - t.Fatalf("expected: %#v, got %#v for %s", expected, c, str) + pi := gps.ProjectIdentifier{ProjectRoot: "github.com/sdboyer/deptest"} + for _, str := range constraints { + _, err := deduceConstraint(str, pi, sm) + if err == nil { + t.Errorf("expected %s to produce an error", str) } } } diff --git a/cmd/dep/glide_importer.go b/cmd/dep/glide_importer.go new file mode 100644 index 0000000000..f6c29c9dd9 --- /dev/null +++ b/cmd/dep/glide_importer.go @@ -0,0 +1,231 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "bytes" + "io/ioutil" + "log" + "os" + "path" + "path/filepath" + + "github.com/go-yaml/yaml" + "github.com/golang/dep" + fb "github.com/golang/dep/internal/feedback" + "github.com/golang/dep/internal/fs" + "github.com/golang/dep/internal/gps" + "github.com/pkg/errors" +) + +const glideYamlName = "glide.yaml" +const glideLockName = "glide.lock" + +// glideImporter imports glide configuration into the dep configuration format. +type glideImporter struct { + yaml glideYaml + lock *glideLock + + logger *log.Logger + verbose bool + sm gps.SourceManager +} + +func newGlideImporter(logger *log.Logger, verbose bool, sm gps.SourceManager) *glideImporter { + return &glideImporter{ + logger: logger, + verbose: verbose, + sm: sm, + } +} + +type glideYaml struct { + Name string `yaml:"package"` + Ignores []string `yaml:"ignore"` + ExcludeDirs []string `yaml:"excludeDirs"` + Imports []glidePackage `yaml:"import"` + TestImports []glidePackage `yaml:"testImport"` +} + +type glideLock struct { + Imports []glideLockedPackage `yaml:"imports"` + TestImports []glideLockedPackage `yaml:"testImports"` +} + +type glidePackage struct { + Name string `yaml:"package"` + Reference string `yaml:"version"` + Repository string `yaml:"repo"` + + // Unsupported fields that we will warn if used + Subpackages []string `yaml:"subpackages"` + OS string `yaml:"os"` + Arch string `yaml:"arch"` +} + +type glideLockedPackage struct { + Name string `yaml:"name"` + Reference string `yaml:"version"` + Repository string `yaml:"repo"` +} + +func (g *glideImporter) Name() string { + return "glide" +} + +func (g *glideImporter) HasDepMetadata(dir string) bool { + // Only require glide.yaml, the lock is optional + y := filepath.Join(dir, glideYamlName) + if _, err := os.Stat(y); err != nil { + return false + } + + return true +} + +func (g *glideImporter) Import(dir string, pr gps.ProjectRoot) (*dep.Manifest, *dep.Lock, error) { + err := g.load(dir) + if err != nil { + return nil, nil, err + } + + return g.convert(pr) +} + +// load the glide configuration files. +func (g *glideImporter) load(projectDir string) error { + g.logger.Println("Detected glide configuration files...") + y := filepath.Join(projectDir, glideYamlName) + if g.verbose { + g.logger.Printf(" Loading %s", y) + } + yb, err := ioutil.ReadFile(y) + if err != nil { + return errors.Wrapf(err, "Unable to read %s", y) + } + err = yaml.Unmarshal(yb, &g.yaml) + if err != nil { + return errors.Wrapf(err, "Unable to parse %s", y) + } + + l := filepath.Join(projectDir, glideLockName) + if exists, _ := fs.IsRegular(l); exists { + if g.verbose { + g.logger.Printf(" Loading %s", l) + } + lb, err := ioutil.ReadFile(l) + if err != nil { + return errors.Wrapf(err, "Unable to read %s", l) + } + lock := &glideLock{} + err = yaml.Unmarshal(lb, lock) + if err != nil { + return errors.Wrapf(err, "Unable to parse %s", l) + } + g.lock = lock + } + + return nil +} + +// convert the glide configuration files into dep configuration files. +func (g *glideImporter) convert(pr gps.ProjectRoot) (*dep.Manifest, *dep.Lock, error) { + projectName := string(pr) + + task := bytes.NewBufferString("Converting from glide.yaml") + if g.lock != nil { + task.WriteString(" and glide.lock") + } + task.WriteString("...") + g.logger.Println(task) + + manifest := &dep.Manifest{ + Constraints: make(gps.ProjectConstraints), + } + + for _, pkg := range g.yaml.Imports { + pc, err := g.buildProjectConstraint(pkg) + if err != nil { + return nil, nil, err + } + manifest.Constraints[pc.Ident.ProjectRoot] = gps.ProjectProperties{Source: pc.Ident.Source, Constraint: pc.Constraint} + } + for _, pkg := range g.yaml.TestImports { + pc, err := g.buildProjectConstraint(pkg) + if err != nil { + return nil, nil, err + } + manifest.Constraints[pc.Ident.ProjectRoot] = gps.ProjectProperties{Source: pc.Ident.Source, Constraint: pc.Constraint} + } + + manifest.Ignored = append(manifest.Ignored, g.yaml.Ignores...) + + if len(g.yaml.ExcludeDirs) > 0 { + if g.yaml.Name != "" && g.yaml.Name != projectName { + g.logger.Printf(" Glide thinks the package is '%s' but dep thinks it is '%s', using dep's value.\n", g.yaml.Name, projectName) + } + + for _, dir := range g.yaml.ExcludeDirs { + pkg := path.Join(projectName, dir) + manifest.Ignored = append(manifest.Ignored, pkg) + } + } + + var lock *dep.Lock + if g.lock != nil { + lock = &dep.Lock{} + + for _, pkg := range g.lock.Imports { + lp := g.buildLockedProject(pkg) + lock.P = append(lock.P, lp) + } + for _, pkg := range g.lock.TestImports { + lp := g.buildLockedProject(pkg) + lock.P = append(lock.P, lp) + } + } + + return manifest, lock, nil +} + +func (g *glideImporter) buildProjectConstraint(pkg glidePackage) (pc gps.ProjectConstraint, err error) { + if pkg.Name == "" { + err = errors.New("Invalid glide configuration, package name is required") + return + } + + if g.verbose { + if pkg.OS != "" { + g.logger.Printf(" The %s package specified an os, but that isn't supported by dep yet, and will be ignored. See https://github.com/golang/dep/issues/291.\n", pkg.Name) + } + if pkg.Arch != "" { + g.logger.Printf(" The %s package specified an arch, but that isn't supported by dep yet, and will be ignored. See https://github.com/golang/dep/issues/291.\n", pkg.Name) + } + } + + pc.Ident = gps.ProjectIdentifier{ProjectRoot: gps.ProjectRoot(pkg.Name), Source: pkg.Repository} + pc.Constraint, err = deduceConstraint(pkg.Reference, pc.Ident, g.sm) + + return +} + +func (g *glideImporter) buildLockedProject(pkg glideLockedPackage) gps.LockedProject { + pi := gps.ProjectIdentifier{ + ProjectRoot: gps.ProjectRoot(pkg.Name), + Source: pkg.Repository, + } + revision := gps.Revision(pkg.Reference) + + version, err := lookupVersionForRevision(revision, pi, g.sm) + if err != nil { + // Warn about the problem, it is not enough to warrant failing + warn := errors.Wrapf(err, "Unable to lookup the version represented by %s in %s(%s). Falling back to locking the revision only.", revision, pi.ProjectRoot, pi.Source) + g.logger.Printf(warn.Error()) + version = revision + } + + feedback(version, pi.ProjectRoot, fb.DepTypeImported, g.logger) + return gps.NewLockedProject(pi, version, nil) +} diff --git a/cmd/dep/glide_importer_test.go b/cmd/dep/glide_importer_test.go new file mode 100644 index 0000000000..c0a5987cf8 --- /dev/null +++ b/cmd/dep/glide_importer_test.go @@ -0,0 +1,371 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "bytes" + "log" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/golang/dep" + "github.com/golang/dep/internal/gps" + "github.com/golang/dep/internal/test" + "github.com/pkg/errors" +) + +const testGlideProjectRoot = "github.com/golang/notexist" + +func newTestLoggers() *dep.Loggers { + return &dep.Loggers{ + Out: log.New(os.Stdout, "", 0), + Err: log.New(os.Stderr, "", 0), + Verbose: true, + } +} + +func newTestContext(h *test.Helper) *dep.Ctx { + h.TempDir("src") + pwd := h.Path(".") + return &dep.Ctx{ + GOPATH: pwd, + Loggers: newTestLoggers(), + } +} + +func TestGlideConfig_Import(t *testing.T) { + h := test.NewHelper(t) + defer h.Cleanup() + + cacheDir := "gps-repocache" + h.TempDir(cacheDir) + h.TempDir("src") + h.TempDir(filepath.Join("src", testGlideProjectRoot)) + h.TempCopy(filepath.Join(testGlideProjectRoot, glideYamlName), "glide/glide.yaml") + h.TempCopy(filepath.Join(testGlideProjectRoot, glideLockName), "glide/glide.lock") + + projectRoot := h.Path(testGlideProjectRoot) + sm, err := gps.NewSourceManager(h.Path(cacheDir)) + h.Must(err) + defer sm.Release() + + // Capture stderr so we can verify output + verboseOutput := &bytes.Buffer{} + logger := log.New(verboseOutput, "", 0) + + g := newGlideImporter(logger, false, sm) // Disable verbose so that we don't print values that change each test run + if !g.HasDepMetadata(projectRoot) { + t.Fatal("Expected the importer to detect the glide configuration files") + } + + m, l, err := g.Import(projectRoot, testGlideProjectRoot) + h.Must(err) + + if m == nil { + t.Fatal("Expected the manifest to be generated") + } + + if l == nil { + t.Fatal("Expected the lock to be generated") + } + + goldenFile := "glide/expected_import_output.txt" + got := verboseOutput.String() + want := h.GetTestFileString(goldenFile) + if want != got { + if *test.UpdateGolden { + if err := h.WriteTestFile(goldenFile, got); err != nil { + t.Fatalf("%+v", errors.Wrapf(err, "Unable to write updated golden file %s", goldenFile)) + } + } else { + t.Fatalf("expected %s, got %s", want, got) + } + } +} + +func TestGlideConfig_Import_MissingLockFile(t *testing.T) { + + h := test.NewHelper(t) + defer h.Cleanup() + + cacheDir := "gps-repocache" + h.TempDir(cacheDir) + h.TempDir("src") + h.TempDir(filepath.Join("src", "glidetest")) + h.TempCopy(filepath.Join("glidetest", glideYamlName), "glide/glide.yaml") + + projectRoot := h.Path("glidetest") + sm, err := gps.NewSourceManager(h.Path(cacheDir)) + h.Must(err) + defer sm.Release() + + logger := log.New(os.Stderr, "", 0) + g := newGlideImporter(logger, true, sm) + if !g.HasDepMetadata(projectRoot) { + t.Fatal("The glide importer should gracefully handle when only glide.yaml is present") + } + + m, l, err := g.Import(projectRoot, testGlideProjectRoot) + h.Must(err) + + if m == nil { + t.Fatal("Expected the manifest to be generated") + } + + if l != nil { + t.Fatal("Expected the lock to not be generated") + } +} + +func TestGlideConfig_Convert_Project(t *testing.T) { + + h := test.NewHelper(t) + defer h.Cleanup() + + pkg := "github.com/sdboyer/deptest" + repo := "https://github.com/sdboyer/deptest.git" + + ctx := newTestContext(h) + sm, err := ctx.SourceManager() + h.Must(err) + defer sm.Release() + + g := newGlideImporter(ctx.Err, true, sm) + g.yaml = glideYaml{ + Imports: []glidePackage{ + { + Name: pkg, + Repository: repo, + Reference: "1.0.0", + }, + }, + } + g.lock = &glideLock{ + Imports: []glideLockedPackage{ + { + Name: pkg, + Repository: repo, + Reference: "ff2948a2ac8f538c4ecd55962e919d1e13e74baf", + }, + }, + } + + manifest, lock, err := g.convert(testGlideProjectRoot) + if err != nil { + t.Fatal(err) + } + + d, ok := manifest.Constraints[gps.ProjectRoot(pkg)] + if !ok { + t.Fatal("Expected the manifest to have a dependency for 'github.com/sdboyer/deptest' but got none") + } + + wantC := "^1.0.0" + gotC := d.Constraint.String() + if gotC != wantC { + t.Fatalf("Expected manifest constraint to be %s, got %s", wantC, gotC) + } + + gotS := d.Source + if gotS != repo { + t.Fatalf("Expected manifest source to be %s, got %s", repo, gotS) + } + + wantP := 1 + gotP := len(lock.P) + if gotP != 1 { + t.Fatalf("Expected the lock to contain %d project but got %d", wantP, gotP) + } + + p := lock.P[0] + gotPr := string(p.Ident().ProjectRoot) + if gotPr != pkg { + t.Fatalf("Expected the lock to have a project for %s but got '%s'", pkg, gotPr) + } + + gotS = p.Ident().Source + if gotS != repo { + t.Fatalf("Expected locked source to be %s, got '%s'", repo, gotS) + } + + lv := p.Version() + lpv, ok := lv.(gps.PairedVersion) + if !ok { + t.Fatalf("Expected locked version to be a PairedVersion but got %T", lv) + } + + wantRev := "ff2948a2ac8f538c4ecd55962e919d1e13e74baf" + gotRev := lpv.Underlying().String() + if gotRev != wantRev { + t.Fatalf("Expected locked revision to be %s, got %s", wantRev, gotRev) + } + + wantV := "v1.0.0" + gotV := lpv.String() + if gotV != wantV { + t.Fatalf("Expected locked version to be %s, got %s", wantV, gotV) + } +} + +func TestGlideConfig_Convert_TestProject(t *testing.T) { + h := test.NewHelper(t) + defer h.Cleanup() + + pkg := "github.com/sdboyer/deptest" + + ctx := newTestContext(h) + sm, err := ctx.SourceManager() + h.Must(err) + defer sm.Release() + + g := newGlideImporter(ctx.Err, true, sm) + g.yaml = glideYaml{ + TestImports: []glidePackage{ + { + Name: pkg, + Reference: "v1.0.0", + }, + }, + } + g.lock = &glideLock{ + TestImports: []glideLockedPackage{ + { + Name: pkg, + Reference: "ff2948a2ac8f538c4ecd55962e919d1e13e74baf", + }, + }, + } + + manifest, lock, err := g.convert(testGlideProjectRoot) + if err != nil { + t.Fatal(err) + } + + _, ok := manifest.Constraints[gps.ProjectRoot(pkg)] + if !ok { + t.Fatalf("Expected the manifest to have a dependency for %s but got none", pkg) + } + + if len(lock.P) != 1 { + t.Fatalf("Expected the lock to contain 1 project but got %d", len(lock.P)) + } + p := lock.P[0] + if p.Ident().ProjectRoot != gps.ProjectRoot(pkg) { + t.Fatalf("Expected the lock to have a project for %s but got '%s'", pkg, p.Ident().ProjectRoot) + } +} + +func TestGlideConfig_Convert_Ignore(t *testing.T) { + pkg := "github.com/sdboyer/deptest" + + logger := log.New(os.Stderr, "", 0) + g := newGlideImporter(logger, true, nil) + g.yaml = glideYaml{ + Ignores: []string{pkg}, + } + + manifest, _, err := g.convert(testGlideProjectRoot) + if err != nil { + t.Fatal(err) + } + + if len(manifest.Ignored) != 1 { + t.Fatalf("Expected the manifest to contain 1 ignored project but got %d", len(manifest.Ignored)) + } + i := manifest.Ignored[0] + if i != pkg { + t.Fatalf("Expected the manifest to ignore %s but got '%s'", pkg, i) + } +} + +func TestGlideConfig_Convert_ExcludeDir(t *testing.T) { + logger := log.New(os.Stderr, "", 0) + g := newGlideImporter(logger, true, nil) + g.yaml = glideYaml{ + ExcludeDirs: []string{"samples"}, + } + + manifest, _, err := g.convert(testGlideProjectRoot) + if err != nil { + t.Fatal(err) + } + + if len(manifest.Ignored) != 1 { + t.Fatalf("Expected the manifest to contain 1 ignored project but got %d", len(manifest.Ignored)) + } + i := manifest.Ignored[0] + if i != "github.com/golang/notexist/samples" { + t.Fatalf("Expected the manifest to ignore 'github.com/golang/notexist/samples' but got '%s'", i) + } +} + +func TestGlideConfig_Convert_ExcludeDir_IgnoresMismatchedPackageName(t *testing.T) { + logger := log.New(os.Stderr, "", 0) + g := newGlideImporter(logger, true, nil) + g.yaml = glideYaml{ + Name: "github.com/golang/mismatched-package-name", + ExcludeDirs: []string{"samples"}, + } + + manifest, _, err := g.convert(testGlideProjectRoot) + if err != nil { + t.Fatal(err) + } + + if len(manifest.Ignored) != 1 { + t.Fatalf("Expected the manifest to contain 1 ignored project but got %d", len(manifest.Ignored)) + } + i := manifest.Ignored[0] + if i != "github.com/golang/notexist/samples" { + t.Fatalf("Expected the manifest to ignore 'github.com/golang/notexist/samples' but got '%s'", i) + } +} + +func TestGlideConfig_Convert_WarnsForUnusedFields(t *testing.T) { + testCases := map[string]glidePackage{ + "specified an os": {OS: "windows"}, + "specified an arch": {Arch: "i686"}, + } + + for wantWarning, pkg := range testCases { + t.Run(wantWarning, func(t *testing.T) { + pkg.Name = "github.com/sdboyer/deptest" + pkg.Reference = "v1.0.0" + + // Capture stderr so we can verify warnings + verboseOutput := &bytes.Buffer{} + logger := log.New(verboseOutput, "", 0) + g := newGlideImporter(logger, true, nil) + g.yaml = glideYaml{ + Imports: []glidePackage{pkg}, + } + + _, _, err := g.convert(testGlideProjectRoot) + if err != nil { + t.Fatal(err) + } + + warnings := verboseOutput.String() + if !strings.Contains(warnings, wantWarning) { + t.Errorf("Expected the output to include the warning '%s'", wantWarning) + } + }) + } +} + +func TestGlideConfig_Convert_BadInput_EmptyPackageName(t *testing.T) { + logger := log.New(os.Stderr, "", 0) + g := newGlideImporter(logger, true, nil) + g.yaml = glideYaml{ + Imports: []glidePackage{{Name: ""}}, + } + + _, _, err := g.convert(testGlideProjectRoot) + if err == nil { + t.Fatal("Expected conversion to fail because the package name is empty") + } +} diff --git a/cmd/dep/gopath_scanner.go b/cmd/dep/gopath_scanner.go new file mode 100644 index 0000000000..a5824be924 --- /dev/null +++ b/cmd/dep/gopath_scanner.go @@ -0,0 +1,392 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "os" + "path/filepath" + "strings" + "sync" + + "github.com/golang/dep" + fb "github.com/golang/dep/internal/feedback" + "github.com/golang/dep/internal/fs" + "github.com/golang/dep/internal/gps" + "github.com/golang/dep/internal/gps/paths" + "github.com/golang/dep/internal/gps/pkgtree" + "github.com/pkg/errors" +) + +// gopathScanner supplies manifest/lock data by scanning the contents of GOPATH +// It uses its results to fill-in any missing details left by the rootAnalyzer. +type gopathScanner struct { + ctx *dep.Ctx + directDeps map[string]bool + sm gps.SourceManager + + pd projectData + origM *dep.Manifest + origL *dep.Lock +} + +func newGopathScanner(ctx *dep.Ctx, directDeps map[string]bool, sm gps.SourceManager) *gopathScanner { + return &gopathScanner{ + ctx: ctx, + directDeps: directDeps, + sm: sm, + } +} + +// InitializeRootManifestAndLock performs analysis of the filesystem tree rooted +// at path, with the root import path importRoot, to determine the project's +// constraints. Respect any initial constraints defined in the root manifest and +// lock. +func (g *gopathScanner) InitializeRootManifestAndLock(rootM *dep.Manifest, rootL *dep.Lock) error { + var err error + + g.ctx.Loggers.Err.Println("Searching GOPATH for projects...") + g.pd, err = g.scanGopathForDependencies() + if err != nil { + return err + } + + g.origM = &dep.Manifest{ + Constraints: g.pd.constraints, + Ovr: make(gps.ProjectConstraints), + } + g.origL = &dep.Lock{ + P: make([]gps.LockedProject, 0, len(g.pd.ondisk)), + } + + for pr, v := range g.pd.ondisk { + // That we have to chop off these path prefixes is a symptom of + // a problem in gps itself + pkgs := make([]string, 0, len(g.pd.dependencies[pr])) + prslash := string(pr) + "/" + for _, pkg := range g.pd.dependencies[pr] { + if pkg == string(pr) { + pkgs = append(pkgs, ".") + } else { + pkgs = append(pkgs, trimPathPrefix(pkg, prslash)) + } + } + + g.origL.P = append(g.origL.P, gps.NewLockedProject( + gps.ProjectIdentifier{ProjectRoot: pr}, v, pkgs), + ) + } + + g.overlay(rootM, rootL) + + return nil +} + +// Fill in gaps in the root manifest/lock with data found from the GOPATH. +func (g *gopathScanner) overlay(rootM *dep.Manifest, rootL *dep.Lock) { + for pkg, prj := range g.origM.Constraints { + if _, has := rootM.Constraints[pkg]; has { + continue + } + rootM.Constraints[pkg] = prj + v := g.pd.ondisk[pkg] + feedback(v, pkg, fb.DepTypeDirect, g.ctx.Err) + } + + // Keep track of which projects have been locked + lockedProjects := map[gps.ProjectRoot]bool{} + for _, lp := range rootL.P { + lockedProjects[lp.Ident().ProjectRoot] = true + } + + for _, lp := range g.origL.P { + pkg := lp.Ident().ProjectRoot + if _, isLocked := lockedProjects[pkg]; isLocked { + continue + } + rootL.P = append(rootL.P, lp) + lockedProjects[pkg] = true + + if _, isDirect := g.directDeps[string(pkg)]; !isDirect { + v := g.pd.ondisk[pkg] + feedback(v, pkg, fb.DepTypeTransitive, g.ctx.Err) + } + } + + // Identify projects whose version is unknown and will have to be solved for + var unlockedProjects []string + for pr := range g.pd.notondisk { + if _, isLocked := lockedProjects[pr]; isLocked { + continue + } + unlockedProjects = append(unlockedProjects, string(pr)) + } + if len(unlockedProjects) > 0 { + g.ctx.Loggers.Err.Printf("Following dependencies were not found in GOPATH. "+ + "Dep will use the most recent versions of these projects.\n %s", + strings.Join(unlockedProjects, "\n ")) + } +} + +func (g *gopathScanner) FinalizeRootManifestAndLock(m *dep.Manifest, l *dep.Lock) { + // Iterate through the new projects in solved lock and add them to manifest + // if direct deps and log feedback for all the new projects. + for _, x := range l.Projects() { + pr := x.Ident().ProjectRoot + newProject := true + // Check if it's a new project, not in the old lock + for _, y := range g.origL.Projects() { + if pr == y.Ident().ProjectRoot { + newProject = false + } + } + if newProject { + // If it's in notondisk, add to manifest, these are direct dependencies. + if _, ok := g.pd.notondisk[pr]; ok { + m.Constraints[pr] = getProjectPropertiesFromVersion(x.Version()) + } + } + } +} + +func trimPathPrefix(p1, p2 string) string { + if fs.HasFilepathPrefix(p1, p2) { + return p1[len(p2):] + } + return p1 +} + +// contains checks if a array of strings contains a value +func contains(a []string, b string) bool { + for _, v := range a { + if b == v { + return true + } + } + return false +} + +// getProjectPropertiesFromVersion takes a gps.Version and returns a proper +// gps.ProjectProperties with Constraint value based on the provided version. +func getProjectPropertiesFromVersion(v gps.Version) gps.ProjectProperties { + pp := gps.ProjectProperties{} + + // extract version and ignore if it's revision only + switch tv := v.(type) { + case gps.PairedVersion: + v = tv.Unpair() + case gps.Revision: + return pp + } + + switch v.Type() { + case gps.IsBranch, gps.IsVersion: + pp.Constraint = v + case gps.IsSemver: + c, err := gps.NewSemverConstraintIC(v.String()) + if err != nil { + panic(err) + } + pp.Constraint = c + } + + return pp +} + +type projectData struct { + constraints gps.ProjectConstraints // constraints that could be found + dependencies map[gps.ProjectRoot][]string // all dependencies (imports) found by project root + notondisk map[gps.ProjectRoot]bool // projects that were not found on disk + ondisk map[gps.ProjectRoot]gps.Version // projects that were found on disk +} + +func (g *gopathScanner) scanGopathForDependencies() (projectData, error) { + constraints := make(gps.ProjectConstraints) + dependencies := make(map[gps.ProjectRoot][]string) + packages := make(map[string]bool) + notondisk := make(map[gps.ProjectRoot]bool) + ondisk := make(map[gps.ProjectRoot]gps.Version) + + var syncDepGroup sync.WaitGroup + syncDep := func(pr gps.ProjectRoot, sm gps.SourceManager) { + if err := sm.SyncSourceFor(gps.ProjectIdentifier{ProjectRoot: pr}); err != nil { + g.ctx.Loggers.Err.Printf("%+v", errors.Wrapf(err, "Unable to cache %s", pr)) + } + syncDepGroup.Done() + } + + if len(g.directDeps) == 0 { + return projectData{}, nil + } + + for ip := range g.directDeps { + pr, err := g.sm.DeduceProjectRoot(ip) + if err != nil { + return projectData{}, errors.Wrap(err, "sm.DeduceProjectRoot") + } + + packages[ip] = true + if _, has := dependencies[pr]; has { + dependencies[pr] = append(dependencies[pr], ip) + continue + } + syncDepGroup.Add(1) + go syncDep(pr, g.sm) + + dependencies[pr] = []string{ip} + v, err := g.ctx.VersionInWorkspace(pr) + if err != nil { + notondisk[pr] = true + continue + } + + ondisk[pr] = v + pp := getProjectPropertiesFromVersion(v) + if pp.Constraint != nil || pp.Source != "" { + constraints[pr] = pp + } + } + + // Explore the packages we've found for transitive deps, either + // completing the lock or identifying (more) missing projects that we'll + // need to ask gps to solve for us. + colors := make(map[string]uint8) + const ( + white uint8 = iota + grey + black + ) + + // cache of PackageTrees, so we don't parse projects more than once + ptrees := make(map[gps.ProjectRoot]pkgtree.PackageTree) + + // depth-first traverser + var dft func(string) error + dft = func(pkg string) error { + switch colors[pkg] { + case white: + colors[pkg] = grey + + pr, err := g.sm.DeduceProjectRoot(pkg) + if err != nil { + return errors.Wrap(err, "could not deduce project root for "+pkg) + } + + // We already visited this project root earlier via some other + // pkg within it, and made the decision that it's not on disk. + // Respect that decision, and pop the stack. + if notondisk[pr] { + colors[pkg] = black + return nil + } + + ptree, has := ptrees[pr] + if !has { + // It's fine if the root does not exist - it indicates that this + // project is not present in the workspace, and so we need to + // solve to deal with this dep. + r := filepath.Join(g.ctx.GOPATH, "src", string(pr)) + fi, err := os.Stat(r) + if os.IsNotExist(err) || !fi.IsDir() { + colors[pkg] = black + notondisk[pr] = true + return nil + } + + // We know the project is on disk; the question is whether we're + // first seeing it here, in the transitive exploration, or if it + // was found in the initial pass on direct imports. We know it's + // the former if there's no entry for it in the ondisk map. + if _, in := ondisk[pr]; !in { + v, err := g.ctx.VersionInWorkspace(pr) + if err != nil { + // Even if we know it's on disk, errors are still + // possible when trying to deduce version. If we + // encounter such an error, just treat the project as + // not being on disk; the solver will work it out. + colors[pkg] = black + notondisk[pr] = true + return nil + } + ondisk[pr] = v + } + + ptree, err = pkgtree.ListPackages(r, string(pr)) + if err != nil { + // Any error here other than an a nonexistent dir (which + // can't happen because we covered that case above) is + // probably critical, so bail out. + return errors.Wrap(err, "gps.ListPackages") + } + ptrees[pr] = ptree + } + + // Get a reachmap that includes main pkgs (even though importing + // them is an error, what we're checking right now is simply whether + // there's a package with go code present on disk), and does not + // backpropagate errors (again, because our only concern right now + // is package existence). + rm, errmap := ptree.ToReachMap(true, false, false, nil) + reached, ok := rm[pkg] + if !ok { + colors[pkg] = black + // not on disk... + notondisk[pr] = true + return nil + } + if _, ok := errmap[pkg]; ok { + // The package is on disk, but contains some errors. + colors[pkg] = black + return nil + } + + if deps, has := dependencies[pr]; has { + if !contains(deps, pkg) { + dependencies[pr] = append(deps, pkg) + } + } else { + dependencies[pr] = []string{pkg} + syncDepGroup.Add(1) + go syncDep(pr, g.sm) + } + + // recurse + for _, rpkg := range reached.External { + if paths.IsStandardImportPath(rpkg) { + continue + } + + err := dft(rpkg) + if err != nil { + // Bubble up any errors we encounter + return err + } + } + + colors[pkg] = black + case grey: + return errors.Errorf("Import cycle detected on %s", pkg) + } + return nil + } + + // run the depth-first traversal from the set of immediate external + // package imports we found in the current project + for pkg := range packages { + err := dft(pkg) + if err != nil { + return projectData{}, err // already errors.Wrap()'d internally + } + } + + syncDepGroup.Wait() + + pd := projectData{ + constraints: constraints, + dependencies: dependencies, + notondisk: notondisk, + ondisk: ondisk, + } + return pd, nil +} diff --git a/cmd/dep/gopath_scanner_test.go b/cmd/dep/gopath_scanner_test.go new file mode 100644 index 0000000000..9b7679dfa0 --- /dev/null +++ b/cmd/dep/gopath_scanner_test.go @@ -0,0 +1,177 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "reflect" + "testing" + + "github.com/golang/dep" + "github.com/golang/dep/internal/gps" + "github.com/golang/dep/internal/test" +) + +const testProject1 string = "github.com/sdboyer/deptest" +const testProject2 string = "github.com/sdboyer/deptestdos" + +func TestGopathScanner_OverlayManifestConstraints(t *testing.T) { + t.Parallel() + + h := test.NewHelper(t) + ctx := newTestContext(h) + + pi1 := gps.ProjectIdentifier{ProjectRoot: gps.ProjectRoot(testProject1)} + pi2 := gps.ProjectIdentifier{ProjectRoot: gps.ProjectRoot(testProject2)} + v1 := gps.NewVersion("v1.0.0") + v2 := gps.NewVersion("v2.0.0") + v3 := gps.NewVersion("v3.0.0") + rootM := &dep.Manifest{ + Constraints: gps.ProjectConstraints{ + pi1.ProjectRoot: gps.ProjectProperties{Constraint: v1}, + }, + } + rootL := &dep.Lock{} + gs := gopathScanner{ + origM: &dep.Manifest{ + Constraints: gps.ProjectConstraints{ + pi1.ProjectRoot: gps.ProjectProperties{Constraint: v2}, + pi2.ProjectRoot: gps.ProjectProperties{Constraint: v3}, + }, + }, + origL: &dep.Lock{}, + ctx: ctx, + pd: projectData{ + ondisk: map[gps.ProjectRoot]gps.Version{ + pi1.ProjectRoot: v2, + pi2.ProjectRoot: v3, + }, + }, + } + + gs.overlay(rootM, rootL) + + dep, has := rootM.Constraints[pi1.ProjectRoot] + if !has { + t.Fatalf("Expected the root manifest to contain %s", pi1.ProjectRoot) + } + wantC := v1.String() + gotC := dep.Constraint.String() + if wantC != gotC { + t.Fatalf("Expected %s to be constrained to '%s', got '%s'", pi1.ProjectRoot, wantC, gotC) + } + + dep, has = rootM.Constraints[pi2.ProjectRoot] + if !has { + t.Fatalf("Expected the root manifest to contain %s", pi2.ProjectRoot) + } + wantC = v3.String() + gotC = dep.Constraint.String() + if wantC != gotC { + t.Fatalf("Expected %s to be constrained to '%s', got '%s'", pi2.ProjectRoot, wantC, gotC) + } +} + +func TestGopathScanner_OverlayLockProjects(t *testing.T) { + t.Parallel() + + h := test.NewHelper(t) + ctx := newTestContext(h) + + rootM := &dep.Manifest{} + pi1 := gps.ProjectIdentifier{ProjectRoot: gps.ProjectRoot(testProject1)} + pi2 := gps.ProjectIdentifier{ProjectRoot: gps.ProjectRoot(testProject2)} + v1 := gps.NewVersion("v1.0.0") + v2 := gps.NewVersion("v2.0.0") + v3 := gps.NewVersion("v3.0.0") + rootL := &dep.Lock{ + P: []gps.LockedProject{gps.NewLockedProject(pi1, v1, []string{})}, + } + gs := gopathScanner{ + origM: &dep.Manifest{Constraints: make(gps.ProjectConstraints)}, + origL: &dep.Lock{ + P: []gps.LockedProject{ + gps.NewLockedProject(pi1, v2, []string{}), // ignored, already exists in lock + gps.NewLockedProject(pi2, v3, []string{}), // should be added to the lock + }, + }, + ctx: ctx, + pd: projectData{ + ondisk: map[gps.ProjectRoot]gps.Version{ + pi1.ProjectRoot: v2, + pi2.ProjectRoot: v3, + }, + }, + } + + gs.overlay(rootM, rootL) + + if len(rootL.P) != 2 { + t.Fatalf("Expected the root manifest to contain 2 packages, got %d", len(rootL.P)) + } + + if rootL.P[0].Version() != v1 { + t.Fatalf("Expected %s to be locked to '%s', got '%s'", rootL.P[0].Ident().ProjectRoot, v1, rootL.P[0].Version()) + } + + if rootL.P[1].Version() != v3 { + t.Fatalf("Expected %s to be locked to '%s', got '%s'", rootL.P[1].Ident().ProjectRoot, v3, rootL.P[1].Version()) + } +} + +func TestContains(t *testing.T) { + t.Parallel() + a := []string{"a", "b", "abcd"} + + if !contains(a, "a") { + t.Fatal("expected array to contain 'a'") + } + if contains(a, "d") { + t.Fatal("expected array to not contain 'd'") + } +} + +func TestGetProjectPropertiesFromVersion(t *testing.T) { + t.Parallel() + wantSemver, _ := gps.NewSemverConstraintIC("v1.0.0") + cases := []struct { + version, want gps.Constraint + }{ + { + version: gps.NewBranch("foo-branch"), + want: gps.NewBranch("foo-branch"), + }, + { + version: gps.NewVersion("foo-version"), + want: gps.NewVersion("foo-version"), + }, + { + version: gps.NewVersion("v1.0.0"), + want: wantSemver, + }, + { + version: gps.NewBranch("foo-branch").Is("some-revision"), + want: gps.NewBranch("foo-branch"), + }, + { + version: gps.NewVersion("foo-version").Is("some-revision"), + want: gps.NewVersion("foo-version"), + }, + { + version: gps.Revision("some-revision"), + want: nil, + }, + { + version: gps.NewVersion("v1.0.0").Is("some-revision"), + want: wantSemver, + }, + } + + for _, c := range cases { + actualProp := getProjectPropertiesFromVersion(c.version.(gps.Version)) + if !reflect.DeepEqual(c.want, actualProp.Constraint) { + t.Fatalf("Constraints are not as expected: \n\t(GOT) %v\n\t(WNT) %v", actualProp.Constraint, c.want) + } + } +} diff --git a/cmd/dep/graphviz_test.go b/cmd/dep/graphviz_test.go index 37a2e03342..a7537f0074 100644 --- a/cmd/dep/graphviz_test.go +++ b/cmd/dep/graphviz_test.go @@ -11,10 +11,12 @@ import ( ) func TestEmptyProject(t *testing.T) { - g := new(graphviz).New() h := test.NewHelper(t) + h.Parallel() defer h.Cleanup() + g := new(graphviz).New() + b := g.output() want := h.GetTestFileString("graphviz/empty.dot") @@ -24,10 +26,12 @@ func TestEmptyProject(t *testing.T) { } func TestSimpleProject(t *testing.T) { - g := new(graphviz).New() h := test.NewHelper(t) + h.Parallel() defer h.Cleanup() + g := new(graphviz).New() + g.createNode("project", "", []string{"foo", "bar"}) g.createNode("foo", "master", []string{"bar"}) g.createNode("bar", "dev", []string{}) @@ -40,10 +44,12 @@ func TestSimpleProject(t *testing.T) { } func TestNoLinks(t *testing.T) { - g := new(graphviz).New() h := test.NewHelper(t) + h.Parallel() defer h.Cleanup() + g := new(graphviz).New() + g.createNode("project", "", []string{}) b := g.output() @@ -54,6 +60,8 @@ func TestNoLinks(t *testing.T) { } func TestIsPathPrefix(t *testing.T) { + t.Parallel() + tcs := []struct { path string pre string diff --git a/cmd/dep/hash_in.go b/cmd/dep/hash_in.go index 4b76a16ccd..d2af94d969 100644 --- a/cmd/dep/hash_in.go +++ b/cmd/dep/hash_in.go @@ -24,7 +24,7 @@ func (cmd *hashinCommand) Register(fs *flag.FlagSet) {} type hashinCommand struct{} func (hashinCommand) Run(ctx *dep.Ctx, args []string) error { - p, err := ctx.LoadProject("") + p, err := ctx.LoadProject() if err != nil { return err } diff --git a/cmd/dep/init.go b/cmd/dep/init.go index efc0863ee8..6f0f83221f 100644 --- a/cmd/dep/init.go +++ b/cmd/dep/init.go @@ -5,18 +5,16 @@ package main import ( - "encoding/hex" "flag" "os" "path/filepath" "strings" - "sync" "time" "github.com/golang/dep" - "github.com/golang/dep/internal" - fb "github.com/golang/dep/internal/feedback" + "github.com/golang/dep/internal/fs" "github.com/golang/dep/internal/gps" + "github.com/golang/dep/internal/gps/paths" "github.com/golang/dep/internal/gps/pkgtree" "github.com/pkg/errors" ) @@ -27,6 +25,12 @@ Initialize the project at filepath root by parsing its dependencies, writing manifest and lock files, and vendoring the dependencies. If root isn't specified, use the current directory. +When configuration for another dependency management tool is detected, it is +imported into the initial manifest and lock. Use the -skip-tools flag to +disable this behavior. The following external tools are supported: glide. +Any dependencies that are not constrained by external configuration use the +GOPATH analysis below. + The version of each dependency will reflect the current state of the GOPATH. If a dependency doesn't exist in the GOPATH, a version will be selected from the versions available from the upstream source per the following algorithm: @@ -48,17 +52,12 @@ func (cmd *initCommand) Hidden() bool { return false } func (cmd *initCommand) Register(fs *flag.FlagSet) { fs.BoolVar(&cmd.noExamples, "no-examples", false, "don't include example in Gopkg.toml") + fs.BoolVar(&cmd.skipTools, "skip-tools", false, "skip importing configuration from other dependency managers") } type initCommand struct { noExamples bool -} - -func trimPathPrefix(p1, p2 string) string { - if internal.HasFilepathPrefix(p1, p2) { - return p1[len(p2):] - } - return p1 + skipTools bool } func (cmd *initCommand) Run(ctx *dep.Ctx, args []string) error { @@ -71,13 +70,19 @@ func (cmd *initCommand) Run(ctx *dep.Ctx, args []string) error { root = ctx.WorkingDir } else { root = args[0] + if !filepath.IsAbs(args[0]) { + root = filepath.Join(ctx.WorkingDir, args[0]) + } + if err := os.MkdirAll(root, os.FileMode(0777)); err != nil { + return errors.Errorf("unable to create directory %s , err %v", root, err) + } } mf := filepath.Join(root, dep.ManifestName) lf := filepath.Join(root, dep.LockName) vpath := filepath.Join(root, "vendor") - mok, err := dep.IsRegular(mf) + mok, err := fs.IsRegular(mf) if err != nil { return err } @@ -86,7 +91,7 @@ func (cmd *initCommand) Run(ctx *dep.Ctx, args []string) error { } // Manifest file does not exist. - lok, err := dep.IsRegular(lf) + lok, err := fs.IsRegular(lf) if err != nil { return err } @@ -98,15 +103,9 @@ func (cmd *initCommand) Run(ctx *dep.Ctx, args []string) error { if err != nil { return errors.Wrap(err, "determineProjectRoot") } - if ctx.Loggers.Verbose { - ctx.Loggers.Err.Printf("dep: Finding dependencies for %q...\n", cpr) - } - pkgT, err := pkgtree.ListPackages(root, cpr) + pkgT, directDeps, err := getDirectDependencies(root, cpr) if err != nil { - return errors.Wrap(err, "gps.ListPackages") - } - if ctx.Loggers.Verbose { - ctx.Loggers.Err.Printf("dep: Found %d dependencies.\n", len(pkgT.Packages)) + return err } sm, err := ctx.SourceManager() if err != nil { @@ -115,53 +114,26 @@ func (cmd *initCommand) Run(ctx *dep.Ctx, args []string) error { sm.UseDefaultSignalHandling() defer sm.Release() - ctx.Loggers.Err.Println("Searching GOPATH for projects...") - pd, err := getProjectData(ctx, pkgT, cpr, sm) + // Initialize with imported data, then fill in the gaps using the GOPATH + rootAnalyzer := newRootAnalyzer(cmd.skipTools, ctx, directDeps, sm) + m, l, err := rootAnalyzer.InitializeRootManifestAndLock(root, gps.ProjectRoot(cpr)) if err != nil { return err } - m := &dep.Manifest{ - Dependencies: pd.constraints, - } - - // Make an initial lock from what knowledge we've collected about the - // versions on disk - l := &dep.Lock{ - P: make([]gps.LockedProject, 0, len(pd.ondisk)), - } - - for pr, v := range pd.ondisk { - // That we have to chop off these path prefixes is a symptom of - // a problem in gps itself - pkgs := make([]string, 0, len(pd.dependencies[pr])) - prslash := string(pr) + "/" - for _, pkg := range pd.dependencies[pr] { - if pkg == string(pr) { - pkgs = append(pkgs, ".") - } else { - pkgs = append(pkgs, trimPathPrefix(pkg, prslash)) - } - } - - l.P = append(l.P, gps.NewLockedProject( - gps.ProjectIdentifier{ProjectRoot: pr}, v, pkgs), - ) + gs := newGopathScanner(ctx, directDeps, sm) + err = gs.InitializeRootManifestAndLock(m, l) + if err != nil { + return err } - ctx.Loggers.Err.Println("Using network for remaining projects...") - // Copy lock before solving. Use this to separate new lock projects from soln - copyLock := *l + rootAnalyzer.skipTools = true // Don't import external config during solve for now - // Run solver with project versions found on disk - if ctx.Loggers.Verbose { - ctx.Loggers.Err.Println("dep: Solving...") - } params := gps.SolveParameters{ RootDir: root, RootPackageTree: pkgT, Manifest: m, Lock: l, - ProjectAnalyzer: dep.Analyzer{}, + ProjectAnalyzer: rootAnalyzer, } if ctx.Loggers.Verbose { @@ -178,31 +150,10 @@ func (cmd *initCommand) Run(ctx *dep.Ctx, args []string) error { handleAllTheFailuresOfTheWorld(err) return err } - l = dep.LockFromInterface(soln) - - // Iterate through the new projects in solved lock and add them to manifest - // if direct deps and log feedback for all the new projects. - for _, x := range l.Projects() { - pr := x.Ident().ProjectRoot - newProject := true - // Check if it's a new project, not in the old lock - for _, y := range copyLock.Projects() { - if pr == y.Ident().ProjectRoot { - newProject = false - } - } - if newProject { - // Check if it's in notondisk project map. These are direct deps, should - // be added to manifest. - if _, ok := pd.notondisk[pr]; ok { - m.Dependencies[pr] = getProjectPropertiesFromVersion(x.Version()) - feedback(x.Version(), pr, fb.DepTypeDirect, ctx) - } else { - // Log feedback of transitive project - feedback(x.Version(), pr, fb.DepTypeTransitive, ctx) - } - } - } + l = dep.LockFromSolution(soln) + + rootAnalyzer.FinalizeRootManifestAndLock(m, l) + gs.FinalizeRootManifestAndLock(m, l) // Run gps.Prepare with appropriate constraint solutions from solve run // to generate the final lock memo. @@ -211,7 +162,7 @@ func (cmd *initCommand) Run(ctx *dep.Ctx, args []string) error { return errors.Wrap(err, "prepare solver") } - l.Memo = s.HashInputs() + l.SolveMeta.InputsDigest = s.HashInputs() // Pass timestamp (yyyyMMddHHmmss format) as suffix to backup name. vendorbak, err := dep.BackupVendor(vpath, time.Now().Format("20060102150405")) @@ -222,44 +173,31 @@ func (cmd *initCommand) Run(ctx *dep.Ctx, args []string) error { ctx.Loggers.Err.Printf("Old vendor backed up to %v", vendorbak) } - if ctx.Loggers.Verbose { - ctx.Loggers.Err.Println("dep: Writing manifest and lock files.") - } - sw, err := dep.NewSafeWriter(m, nil, l, dep.VendorAlways) if err != nil { return err } - if err := sw.Write(root, sm, cmd.noExamples); err != nil { + if err := sw.Write(root, sm, !cmd.noExamples); err != nil { return errors.Wrap(err, "safe write of manifest and lock") } return nil } -// contains checks if a array of strings contains a value -func contains(a []string, b string) bool { - for _, v := range a { - if b == v { - return true - } +func getDirectDependencies(root, cpr string) (pkgtree.PackageTree, map[string]bool, error) { + pkgT, err := pkgtree.ListPackages(root, cpr) + if err != nil { + return pkgtree.PackageTree{}, nil, errors.Wrap(err, "gps.ListPackages") } - return false -} -// isStdLib reports whether $GOROOT/src/path should be considered -// part of the standard distribution. For historical reasons we allow people to add -// their own code to $GOROOT instead of using $GOPATH, but we assume that -// code will start with a domain name (dot in the first element). -// This was loving taken from src/cmd/go/pkg.go in Go's code (isStandardImportPath). -func isStdLib(path string) bool { - i := strings.Index(path, "/") - if i < 0 { - i = len(path) + directDeps := map[string]bool{} + rm, _ := pkgT.ToReachMap(true, true, false, nil) + for _, pr := range rm.FlattenFn(paths.IsStandardImportPath) { + directDeps[pr] = true } - elem := path[:i] - return !strings.Contains(elem, ".") + + return pkgT, directDeps, nil } // TODO solve failures can be really creative - we need to be similarly creative @@ -273,291 +211,3 @@ func hasImportPathPrefix(s, prefix string) bool { } return strings.HasPrefix(s, prefix+"/") } - -// feedback logs project constraint as feedback to the user. -func feedback(v gps.Version, pr gps.ProjectRoot, depType string, ctx *dep.Ctx) { - rev, version, branch := gps.VersionComponentStrings(v) - - // Check if it's a valid SHA1 digest and trim to 7 characters. - if len(rev) == 40 { - if _, err := hex.DecodeString(rev); err == nil { - // Valid SHA1 digest - rev = rev[0:7] - } - } - - // Get LockedVersion - var ver string - if version != "" { - ver = version - } else if branch != "" { - ver = branch - } - - cf := &fb.ConstraintFeedback{ - LockedVersion: ver, - Revision: rev, - ProjectPath: string(pr), - DependencyType: depType, - } - - // Get non-revision constraint if available - if c := getProjectPropertiesFromVersion(v).Constraint; c != nil { - cf.Version = c.String() - } - - // Attach ConstraintType for direct dep based on locked version - if cf.DependencyType == fb.DepTypeDirect { - if cf.LockedVersion != "" { - cf.ConstraintType = fb.ConsTypeConstraint - } else { - cf.ConstraintType = fb.ConsTypeHint - } - } - - cf.LogFeedback(ctx) -} - -// getProjectPropertiesFromVersion takes a gps.Version and returns a proper -// gps.ProjectProperties with Constraint value based on the provided version. -func getProjectPropertiesFromVersion(v gps.Version) gps.ProjectProperties { - pp := gps.ProjectProperties{} - - // extract version and ignore if it's revision only - switch tv := v.(type) { - case gps.PairedVersion: - v = tv.Unpair() - case gps.Revision: - return pp - } - - switch v.Type() { - case gps.IsBranch, gps.IsVersion: - pp.Constraint = v - case gps.IsSemver: - // TODO: remove "^" when https://github.com/golang/dep/issues/225 is ready. - c, err := gps.NewSemverConstraint("^" + v.String()) - if err != nil { - panic(err) - } - pp.Constraint = c - } - - return pp -} - -type projectData struct { - constraints gps.ProjectConstraints // constraints that could be found - dependencies map[gps.ProjectRoot][]string // all dependencies (imports) found by project root - notondisk map[gps.ProjectRoot]bool // projects that were not found on disk - ondisk map[gps.ProjectRoot]gps.Version // projects that were found on disk -} - -func getProjectData(ctx *dep.Ctx, pkgT pkgtree.PackageTree, cpr string, sm gps.SourceManager) (projectData, error) { - constraints := make(gps.ProjectConstraints) - dependencies := make(map[gps.ProjectRoot][]string) - packages := make(map[string]bool) - notondisk := make(map[gps.ProjectRoot]bool) - ondisk := make(map[gps.ProjectRoot]gps.Version) - - var syncDepGroup sync.WaitGroup - syncDep := func(pr gps.ProjectRoot, sm gps.SourceManager) { - if err := sm.SyncSourceFor(gps.ProjectIdentifier{ProjectRoot: pr}); err != nil { - ctx.Loggers.Err.Printf("Unable to cache %s", pr) - } - syncDepGroup.Done() - } - - rm, _ := pkgT.ToReachMap(true, true, false, nil) - if len(rm) == 0 { - return projectData{}, nil - } - - if ctx.Loggers.Verbose { - ctx.Loggers.Err.Println("dep: Building dependency graph...") - } - // Exclude stdlib imports from the list returned from Flatten(). - const omitStdlib = false - for _, ip := range rm.Flatten(omitStdlib) { - pr, err := sm.DeduceProjectRoot(ip) - if err != nil { - return projectData{}, errors.Wrap(err, "sm.DeduceProjectRoot") // TODO: Skip and report ? - } - - packages[ip] = true - if _, has := dependencies[pr]; has { - dependencies[pr] = append(dependencies[pr], ip) - continue - } - syncDepGroup.Add(1) - go syncDep(pr, sm) - - if ctx.Loggers.Verbose { - ctx.Loggers.Err.Printf("dep: Found import of %q, analyzing...\n", ip) - } - - dependencies[pr] = []string{ip} - v, err := ctx.VersionInWorkspace(pr) - if err != nil { - notondisk[pr] = true - if ctx.Loggers.Verbose { - ctx.Loggers.Err.Printf("dep: Could not determine version for %q, omitting from generated manifest\n", pr) - } - continue - } - - ondisk[pr] = v - constraints[pr] = getProjectPropertiesFromVersion(v) - - feedback(v, pr, fb.DepTypeDirect, ctx) - } - - if ctx.Loggers.Verbose { - ctx.Loggers.Err.Printf("dep: Analyzing transitive imports...\n") - } - // Explore the packages we've found for transitive deps, either - // completing the lock or identifying (more) missing projects that we'll - // need to ask gps to solve for us. - colors := make(map[string]uint8) - const ( - white uint8 = iota - grey - black - ) - - // cache of PackageTrees, so we don't parse projects more than once - ptrees := make(map[gps.ProjectRoot]pkgtree.PackageTree) - - // depth-first traverser - var dft func(string) error - dft = func(pkg string) error { - switch colors[pkg] { - case white: - if ctx.Loggers.Verbose { - ctx.Loggers.Err.Printf("dep: Analyzing %q...\n", pkg) - } - colors[pkg] = grey - - pr, err := sm.DeduceProjectRoot(pkg) - if err != nil { - return errors.Wrap(err, "could not deduce project root for "+pkg) - } - - // We already visited this project root earlier via some other - // pkg within it, and made the decision that it's not on disk. - // Respect that decision, and pop the stack. - if notondisk[pr] { - colors[pkg] = black - return nil - } - - ptree, has := ptrees[pr] - if !has { - // It's fine if the root does not exist - it indicates that this - // project is not present in the workspace, and so we need to - // solve to deal with this dep. - r := filepath.Join(ctx.GOPATH, "src", string(pr)) - fi, err := os.Stat(r) - if os.IsNotExist(err) || !fi.IsDir() { - colors[pkg] = black - notondisk[pr] = true - return nil - } - - // We know the project is on disk; the question is whether we're - // first seeing it here, in the transitive exploration, or if it - // was found in the initial pass on direct imports. We know it's - // the former if there's no entry for it in the ondisk map. - if _, in := ondisk[pr]; !in { - v, err := ctx.VersionInWorkspace(pr) - if err != nil { - // Even if we know it's on disk, errors are still - // possible when trying to deduce version. If we - // encounter such an error, just treat the project as - // not being on disk; the solver will work it out. - colors[pkg] = black - notondisk[pr] = true - return nil - } - ondisk[pr] = v - feedback(v, pr, fb.DepTypeTransitive, ctx) - } - - ptree, err = pkgtree.ListPackages(r, string(pr)) - if err != nil { - // Any error here other than an a nonexistent dir (which - // can't happen because we covered that case above) is - // probably critical, so bail out. - return errors.Wrap(err, "gps.ListPackages") - } - ptrees[pr] = ptree - } - - // Get a reachmap that includes main pkgs (even though importing - // them is an error, what we're checking right now is simply whether - // there's a package with go code present on disk), and does not - // backpropagate errors (again, because our only concern right now - // is package existence). - rm, errmap := ptree.ToReachMap(true, false, false, nil) - reached, ok := rm[pkg] - if !ok { - colors[pkg] = black - // not on disk... - notondisk[pr] = true - return nil - } - if _, ok := errmap[pkg]; ok { - // The package is on disk, but contains some errors. - colors[pkg] = black - return nil - } - - if deps, has := dependencies[pr]; has { - if !contains(deps, pkg) { - dependencies[pr] = append(deps, pkg) - } - } else { - dependencies[pr] = []string{pkg} - syncDepGroup.Add(1) - go syncDep(pr, sm) - } - - // recurse - for _, rpkg := range reached.External { - if isStdLib(rpkg) { - continue - } - - err := dft(rpkg) - if err != nil { - // Bubble up any errors we encounter - return err - } - } - - colors[pkg] = black - case grey: - return errors.Errorf("Import cycle detected on %s", pkg) - } - return nil - } - - // run the depth-first traversal from the set of immediate external - // package imports we found in the current project - for pkg := range packages { - err := dft(pkg) - if err != nil { - return projectData{}, err // already errors.Wrap()'d internally - } - } - - syncDepGroup.Wait() - - pd := projectData{ - constraints: constraints, - dependencies: dependencies, - notondisk: notondisk, - ondisk: ondisk, - } - return pd, nil -} diff --git a/cmd/dep/init_test.go b/cmd/dep/init_test.go deleted file mode 100644 index 1b9c7b6a0d..0000000000 --- a/cmd/dep/init_test.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package main - -import ( - "reflect" - "testing" - - "github.com/golang/dep/internal/gps" -) - -func TestContains(t *testing.T) { - a := []string{"a", "b", "abcd"} - - if !contains(a, "a") { - t.Fatal("expected array to contain 'a'") - } - if contains(a, "d") { - t.Fatal("expected array to not contain 'd'") - } -} - -func TestIsStdLib(t *testing.T) { - tests := map[string]bool{ - "github.com/Sirupsen/logrus": false, - "encoding/json": true, - "golang.org/x/net/context": false, - "net/context": true, - ".": false, - } - - for p, e := range tests { - b := isStdLib(p) - if b != e { - t.Fatalf("%s: expected %t got %t", p, e, b) - } - } -} - -func TestGetProjectPropertiesFromVersion(t *testing.T) { - wantSemver, _ := gps.NewSemverConstraint("^v1.0.0") - cases := []struct { - version, want gps.Constraint - }{ - { - version: gps.NewBranch("foo-branch"), - want: gps.NewBranch("foo-branch"), - }, - { - version: gps.NewVersion("foo-version"), - want: gps.NewVersion("foo-version"), - }, - { - version: gps.NewVersion("v1.0.0"), - want: wantSemver, - }, - { - version: gps.NewBranch("foo-branch").Is("some-revision"), - want: gps.NewBranch("foo-branch"), - }, - { - version: gps.NewVersion("foo-version").Is("some-revision"), - want: gps.NewVersion("foo-version"), - }, - { - version: gps.Revision("some-revision"), - want: nil, - }, - { - version: gps.NewVersion("v1.0.0").Is("some-revision"), - want: wantSemver, - }, - } - - for _, c := range cases { - actualProp := getProjectPropertiesFromVersion(c.version.(gps.Version)) - if !reflect.DeepEqual(c.want, actualProp.Constraint) { - t.Fatalf("Constraints are not as expected: \n\t(GOT) %v\n\t(WNT) %v", actualProp.Constraint, c.want) - } - } -} diff --git a/cmd/dep/integration_test.go b/cmd/dep/integration_test.go index b886596eb4..633c065620 100644 --- a/cmd/dep/integration_test.go +++ b/cmd/dep/integration_test.go @@ -18,31 +18,42 @@ import ( ) func TestIntegration(t *testing.T) { + t.Parallel() + test.NeedsExternalNetwork(t) test.NeedsGit(t) - filepath.Walk(filepath.Join("testdata", "harness_tests"), func(path string, info os.FileInfo, err error) error { - if err != nil { - t.Fatal("error walking filepath") - } + wd, err := os.Getwd() + if err != nil { + t.Fatal(err) + } - wd, err := os.Getwd() - if err != nil { - panic(err) - } + for _, dirName := range []string{ + "harness_tests", + "init_path_tests", + } { + relPath := filepath.Join("testdata", dirName) + filepath.Walk(relPath, func(path string, info os.FileInfo, err error) error { + if err != nil { + t.Fatal("error walking filepath") + } + + if filepath.Base(path) != "testcase.json" { + return nil + } - if filepath.Base(path) == "testcase.json" { parse := strings.Split(path, string(filepath.Separator)) testName := strings.Join(parse[2:len(parse)-1], "/") t.Run(testName, func(t *testing.T) { t.Parallel() - t.Run("external", testIntegration(testName, wd, true, execCmd)) - t.Run("internal", testIntegration(testName, wd, false, runMain)) + t.Run("external", testIntegration(testName, relPath, wd, true, execCmd)) + t.Run("internal", testIntegration(testName, relPath, wd, false, runMain)) }) - } - return nil - }) + + return nil + }) + } } // execCmd is a test.RunFunc which runs the program in another process. @@ -80,13 +91,13 @@ func runMain(prog string, args []string, stdout, stderr io.Writer, dir string, e return } -func testIntegration(name, wd string, externalProc bool, run test.RunFunc) func(t *testing.T) { +// testIntegration runs the test specified by ///testcase.json +func testIntegration(name, relPath, wd string, externalProc bool, run test.RunFunc) func(t *testing.T) { return func(t *testing.T) { t.Parallel() // Set up environment - testCase := test.NewTestCase(t, name, wd) - defer testCase.Cleanup() + testCase := test.NewTestCase(t, filepath.Join(wd, relPath), name) testProj := test.NewTestProject(t, testCase.InitialPath(), wd, externalProc, run) defer testProj.Cleanup() @@ -119,12 +130,18 @@ func testIntegration(name, wd string, externalProc bool, run test.RunFunc) func( // Check output testCase.CompareOutput(testProj.GetStdout()) - // Check final manifest and lock - testCase.CompareFile(dep.ManifestName, testProj.ProjPath(dep.ManifestName)) - testCase.CompareFile(dep.LockName, testProj.ProjPath(dep.LockName)) - // Check vendor paths testProj.CompareImportPaths() testCase.CompareVendorPaths(testProj.GetVendorPaths()) + + if *test.UpdateGolden { + // Update manifest and lock + testCase.UpdateFile(dep.ManifestName, testProj.ProjPath(dep.ManifestName)) + testCase.UpdateFile(dep.LockName, testProj.ProjPath(dep.LockName)) + } else { + // Check final manifest and lock + testCase.CompareFile(dep.ManifestName, testProj.ProjPath(dep.ManifestName)) + testCase.CompareFile(dep.LockName, testProj.ProjPath(dep.LockName)) + } } } diff --git a/cmd/dep/main.go b/cmd/dep/main.go index ae4cf3ab94..dfd960abf5 100644 --- a/cmd/dep/main.go +++ b/cmd/dep/main.go @@ -60,7 +60,6 @@ func (c *Config) Run() (exitCode int) { &initCommand{}, &statusCommand{}, &ensureCommand{}, - &removeCommand{}, &hashinCommand{}, &pruneCommand{}, } @@ -120,6 +119,7 @@ func (c *Config) Run() (exitCode int) { if cmd.Name() == cmdName { // Build flag set with global flags in there. fs := flag.NewFlagSet(cmdName, flag.ContinueOnError) + fs.SetOutput(c.Stderr) verbose := fs.Bool("v", false, "enable verbose logging") // Register the subcommand flags in there, too. @@ -135,8 +135,9 @@ func (c *Config) Run() (exitCode int) { } // Parse the flags the user gave us. + // flag package automaticly prints usage and error message in err != nil + // or if '-h' flag provided if err := fs.Parse(c.Args[2:]); err != nil { - fs.Usage() exitCode = 1 return } diff --git a/cmd/dep/prune.go b/cmd/dep/prune.go index cd59655160..89e61403fc 100644 --- a/cmd/dep/prune.go +++ b/cmd/dep/prune.go @@ -7,7 +7,7 @@ package main import ( "bytes" "flag" - "fmt" + "log" "github.com/golang/dep" "github.com/golang/dep/internal/gps" @@ -36,7 +36,7 @@ func (cmd *pruneCommand) Register(fs *flag.FlagSet) { } func (cmd *pruneCommand) Run(ctx *dep.Ctx, args []string) error { - p, err := ctx.LoadProject("") + p, err := ctx.LoadProject() if err != nil { return err } @@ -68,9 +68,17 @@ func (cmd *pruneCommand) Run(ctx *dep.Ctx, args []string) error { return errors.Wrap(err, "could not set up solver for input hashing") } - if !bytes.Equal(s.HashInputs(), p.Lock.Memo) { - return fmt.Errorf("lock hash doesn't match") + if p.Lock == nil { + return errors.Errorf("Gopkg.lock must exist for prune to know what files are safe to remove.") } - return dep.PruneProject(p, sm) + if !bytes.Equal(s.HashInputs(), p.Lock.SolveMeta.InputsDigest) { + return errors.Errorf("Gopkg.lock is out of sync; run dep ensure before pruning.") + } + + var pruneLogger *log.Logger + if ctx.Loggers.Verbose { + pruneLogger = ctx.Loggers.Err + } + return dep.PruneProject(p, sm, pruneLogger) } diff --git a/cmd/dep/remove.go b/cmd/dep/remove.go deleted file mode 100644 index e53e68e7da..0000000000 --- a/cmd/dep/remove.go +++ /dev/null @@ -1,189 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package main - -import ( - "flag" - "strings" - - "github.com/golang/dep" - "github.com/golang/dep/internal/gps" - "github.com/golang/dep/internal/gps/pkgtree" - "github.com/pkg/errors" -) - -const removeShortHelp = `Remove a dependency from the project` -const removeLongHelp = ` -Remove a dependency from the project's lock file, and vendor -folder. If the project includes that dependency in its import graph, remove will -fail unless -force is specified. -` - -func (cmd *removeCommand) Name() string { return "remove" } -func (cmd *removeCommand) Args() string { return "[spec...]" } -func (cmd *removeCommand) ShortHelp() string { return removeShortHelp } -func (cmd *removeCommand) LongHelp() string { return removeLongHelp } -func (cmd *removeCommand) Hidden() bool { return false } - -func (cmd *removeCommand) Register(fs *flag.FlagSet) { - fs.BoolVar(&cmd.dryRun, "n", false, "dry run, don't actually remove anything") - fs.BoolVar(&cmd.unused, "unused", false, "remove all dependencies that aren't imported by the project") - fs.BoolVar(&cmd.force, "force", false, "remove the given dependencies even if they are imported by the project") - fs.BoolVar(&cmd.keepSource, "keep-source", false, "don't remove source code") -} - -type removeCommand struct { - dryRun bool - unused bool - force bool - keepSource bool -} - -func (cmd *removeCommand) Run(ctx *dep.Ctx, args []string) error { - p, err := ctx.LoadProject("") - if err != nil { - return err - } - - sm, err := ctx.SourceManager() - if err != nil { - return err - } - sm.UseDefaultSignalHandling() - defer sm.Release() - - cpr, err := ctx.SplitAbsoluteProjectRoot(p.AbsRoot) - if err != nil { - return errors.Wrap(err, "determineProjectRoot") - } - - pkgT, err := pkgtree.ListPackages(p.AbsRoot, cpr) - if err != nil { - return errors.Wrap(err, "gps.ListPackages") - } - - reachmap, _ := pkgT.ToReachMap(true, true, false, nil) - - if cmd.unused { - if len(args) > 0 { - return errors.Errorf("remove takes no arguments when running with -unused") - } - - reachlist := reachmap.Flatten(false) - - // warm the cache in parallel, in case any paths require go get metadata - // discovery - for _, im := range reachlist { - go sm.DeduceProjectRoot(im) - } - - otherroots := make(map[gps.ProjectRoot]bool) - for _, im := range reachlist { - if isStdLib(im) { - continue - } - pr, err := sm.DeduceProjectRoot(im) - if err != nil { - // not being able to detect the root for an import path that's - // actually in the import list is a deeper problem. However, - // it's not our direct concern here, so we just warn. - ctx.Loggers.Err.Printf("dep: could not infer root for %q\n", pr) - continue - } - otherroots[pr] = true - } - - var rm []gps.ProjectRoot - for pr := range p.Manifest.Dependencies { - if _, has := otherroots[pr]; !has { - delete(p.Manifest.Dependencies, pr) - rm = append(rm, pr) - } - } - - if len(rm) == 0 { - ctx.Loggers.Err.Println("dep: nothing to do") - return nil - } - } else { - // warm the cache in parallel, in case any paths require go get metadata - // discovery - for _, arg := range args { - go sm.DeduceProjectRoot(arg) - } - - for _, arg := range args { - pr, err := sm.DeduceProjectRoot(arg) - if err != nil { - // couldn't detect the project root for this string - - // a non-valid project root was provided - return errors.Wrap(err, "gps.DeduceProjectRoot") - } - if string(pr) != arg { - // don't be magical with subpaths, otherwise we muddy the waters - // between project roots and import paths - return errors.Errorf("%q is not a project root, but %q is - is that what you want to remove?", arg, pr) - } - - /* - * - Remove package from manifest - * - if the package IS NOT being used, solving should do what we want - * - if the package IS being used: - * - Desired behavior: stop and tell the user, unless --force - * - Actual solver behavior: ? - */ - var pkgimport []string - for pkg, ie := range reachmap { - for _, im := range ie.External { - if hasImportPathPrefix(im, arg) { - pkgimport = append(pkgimport, pkg) - break - } - } - } - - if _, indeps := p.Manifest.Dependencies[gps.ProjectRoot(arg)]; !indeps { - return errors.Errorf("%q is not present in the manifest, cannot remove it", arg) - } - - if len(pkgimport) > 0 && !cmd.force { - if len(pkgimport) == 1 { - return errors.Errorf("not removing %q because it is imported by %q (pass -force to override)", arg, pkgimport[0]) - } - return errors.Errorf("not removing %q because it is imported by:\n\t%s (pass -force to override)", arg, strings.Join(pkgimport, "\n\t")) - } - - delete(p.Manifest.Dependencies, gps.ProjectRoot(arg)) - } - } - - params := p.MakeParams() - params.RootPackageTree = pkgT - - if ctx.Loggers.Verbose { - params.TraceLogger = ctx.Loggers.Err - } - s, err := gps.Prepare(params, sm) - if err != nil { - return errors.Wrap(err, "prepare solver") - } - - soln, err := s.Solve() - if err != nil { - handleAllTheFailuresOfTheWorld(err) - return err - } - - newLock := dep.LockFromInterface(soln) - - sw, err := dep.NewSafeWriter(nil, p.Lock, newLock, dep.VendorOnChanged) - if err != nil { - return err - } - if err := sw.Write(p.AbsRoot, sm, true); err != nil { - return errors.Wrap(err, "grouped write of manifest, lock and vendor") - } - return nil -} diff --git a/cmd/dep/remove_test.go b/cmd/dep/remove_test.go deleted file mode 100644 index 881ccd946c..0000000000 --- a/cmd/dep/remove_test.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package main - -import ( - "os" - "testing" - - "github.com/golang/dep/internal/test" -) - -func TestRemoveErrors(t *testing.T) { - test.NeedsExternalNetwork(t) - test.NeedsGit(t) - - testName := "remove/unused/case1" - wd, err := os.Getwd() - if err != nil { - panic(err) - } - - t.Run(testName+"/external", removeErrors(testName, wd, true, execCmd)) - t.Run(testName+"/internal", removeErrors(testName, wd, false, runMain)) -} - -func removeErrors(name, wd string, externalProc bool, run test.RunFunc) func(*testing.T) { - return func(t *testing.T) { - testCase := test.NewTestCase(t, name, wd) - testProj := test.NewTestProject(t, testCase.InitialPath(), wd, externalProc, run) - defer testProj.Cleanup() - - // Create and checkout the vendor revisions - for ip, rev := range testCase.VendorInitial { - testProj.GetVendorGit(ip) - testProj.RunGit(testProj.VendorPath(ip), "checkout", rev) - } - - // Create and checkout the import revisions - for ip, rev := range testCase.GopathInitial { - testProj.RunGo("get", ip) - testProj.RunGit(testProj.Path("src", ip), "checkout", rev) - } - - if err := testProj.DoRun([]string{"remove", "-unused", "github.com/not/used"}); err == nil { - t.Fatal("rm with both -unused and arg should have failed") - } - - if err := testProj.DoRun([]string{"remove", "github.com/not/present"}); err == nil { - t.Fatal("rm with arg not in manifest should have failed") - } - - if err := testProj.DoRun([]string{"remove", "github.com/not/used", "github.com/not/present"}); err == nil { - t.Fatal("rm with one arg not in manifest should have failed") - } - - if err := testProj.DoRun([]string{"remove", "github.com/sdboyer/deptest"}); err == nil { - t.Fatal("rm of arg in manifest and imports should have failed without -force") - } - } -} diff --git a/cmd/dep/root_analyzer.go b/cmd/dep/root_analyzer.go new file mode 100644 index 0000000000..9868cede4e --- /dev/null +++ b/cmd/dep/root_analyzer.go @@ -0,0 +1,210 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "encoding/hex" + + "github.com/golang/dep" + fb "github.com/golang/dep/internal/feedback" + "github.com/golang/dep/internal/gps" + "github.com/pkg/errors" + "io/ioutil" + "log" +) + +// importer handles importing configuration from other dependency managers into +// the dep configuration format. +type importer interface { + Name() string + Import(path string, pr gps.ProjectRoot) (*dep.Manifest, *dep.Lock, error) + HasDepMetadata(dir string) bool +} + +// rootAnalyzer supplies manifest/lock data from both dep and external tool's +// configuration files. +// * When used on the root project, it imports only from external tools. +// * When used by the solver for dependencies, it first looks for dep config, +// then external tools. +type rootAnalyzer struct { + skipTools bool + ctx *dep.Ctx + sm gps.SourceManager + directDeps map[string]bool +} + +func newRootAnalyzer(skipTools bool, ctx *dep.Ctx, directDeps map[string]bool, sm gps.SourceManager) *rootAnalyzer { + return &rootAnalyzer{ + skipTools: skipTools, + ctx: ctx, + sm: sm, + directDeps: directDeps, + } +} + +func (a *rootAnalyzer) InitializeRootManifestAndLock(dir string, pr gps.ProjectRoot) (rootM *dep.Manifest, rootL *dep.Lock, err error) { + if !a.skipTools { + rootM, rootL, err = a.importManifestAndLock(dir, pr, false) + if err != nil { + return + } + } + + if rootM == nil { + rootM = &dep.Manifest{ + Constraints: make(gps.ProjectConstraints), + Ovr: make(gps.ProjectConstraints), + } + } + if rootL == nil { + rootL = &dep.Lock{} + } + + return +} + +func (a *rootAnalyzer) importManifestAndLock(dir string, pr gps.ProjectRoot, suppressLogs bool) (*dep.Manifest, *dep.Lock, error) { + logger := a.ctx.Err + if suppressLogs { + logger = log.New(ioutil.Discard, "", 0) + } + + importers := []importer{ + newGlideImporter(logger, a.ctx.Verbose, a.sm), + } + + for _, i := range importers { + if i.HasDepMetadata(dir) { + a.ctx.Loggers.Err.Printf("Importing configuration from %s. These are only initial constraints, and are further refined during the solve process.", i.Name()) + m, l, err := i.Import(dir, pr) + a.removeTransitiveDependencies(m) + return m, l, err + } + } + + var emptyManifest = &dep.Manifest{Constraints: make(gps.ProjectConstraints), Ovr: make(gps.ProjectConstraints)} + return emptyManifest, nil, nil +} + +func (a *rootAnalyzer) removeTransitiveDependencies(m *dep.Manifest) { + for pr := range m.Constraints { + if _, isDirect := a.directDeps[string(pr)]; !isDirect { + delete(m.Constraints, pr) + } + } +} + +// DeriveManifestAndLock evaluates a dependency for existing dependency manager +// configuration (ours or external) and passes any configuration found back +// to the solver. +func (a *rootAnalyzer) DeriveManifestAndLock(dir string, pr gps.ProjectRoot) (gps.Manifest, gps.Lock, error) { + // Ignore other tools if we find dep configuration + var depAnalyzer dep.Analyzer + if depAnalyzer.HasDepMetadata(dir) { + return depAnalyzer.DeriveManifestAndLock(dir, pr) + } + + if !a.skipTools { + // The assignment back to an interface prevents interface-based nil checks from failing later + var manifest gps.Manifest = gps.SimpleManifest{} + var lock gps.Lock + im, il, err := a.importManifestAndLock(dir, pr, true) + if im != nil { + manifest = im + } + if il != nil { + lock = il + } + return manifest, lock, err + } + + return gps.SimpleManifest{}, nil, nil +} + +func (a *rootAnalyzer) FinalizeRootManifestAndLock(m *dep.Manifest, l *dep.Lock) { + // Remove dependencies from the manifest that aren't used + for pr := range m.Constraints { + var used bool + for _, y := range l.Projects() { + if pr == y.Ident().ProjectRoot { + used = true + break + } + } + if !used { + delete(m.Constraints, pr) + } + } +} + +func (a *rootAnalyzer) Info() (string, int) { + name := "dep" + version := 1 + if !a.skipTools { + name = "dep+import" + } + return name, version +} + +// feedback logs project constraint as feedback to the user. +func feedback(v gps.Version, pr gps.ProjectRoot, depType string, logger *log.Logger) { + rev, version, branch := gps.VersionComponentStrings(v) + + // Check if it's a valid SHA1 digest and trim to 7 characters. + if len(rev) == 40 { + if _, err := hex.DecodeString(rev); err == nil { + // Valid SHA1 digest + rev = rev[0:7] + } + } + + // Get LockedVersion + var ver string + if version != "" { + ver = version + } else if branch != "" { + ver = branch + } + + cf := &fb.ConstraintFeedback{ + LockedVersion: ver, + Revision: rev, + ProjectPath: string(pr), + DependencyType: depType, + } + + // Get non-revision constraint if available + if c := getProjectPropertiesFromVersion(v).Constraint; c != nil { + cf.Version = c.String() + } + + // Attach ConstraintType for direct/imported deps based on locked version + if cf.DependencyType == fb.DepTypeDirect || cf.DependencyType == fb.DepTypeImported { + if cf.LockedVersion != "" { + cf.ConstraintType = fb.ConsTypeConstraint + } else { + cf.ConstraintType = fb.ConsTypeHint + } + } + + cf.LogFeedback(logger) +} + +func lookupVersionForRevision(rev gps.Revision, pi gps.ProjectIdentifier, sm gps.SourceManager) (gps.Version, error) { + // Find the version that goes with this revision, if any + versions, err := sm.ListVersions(pi) + if err != nil { + return nil, errors.Wrapf(err, "Unable to list versions for %s(%s)", pi.ProjectRoot, pi.Source) + } + + gps.SortPairedForUpgrade(versions) // Sort versions in asc order + for _, v := range versions { + if v.Underlying() == rev { + return v, nil + } + } + + return rev, nil +} diff --git a/cmd/dep/root_analyzer_test.go b/cmd/dep/root_analyzer_test.go new file mode 100644 index 0000000000..baca305d9f --- /dev/null +++ b/cmd/dep/root_analyzer_test.go @@ -0,0 +1,21 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import "testing" + +func TestRootAnalyzer_Info(t *testing.T) { + testCases := map[bool]string{ + true: "dep", + false: "dep+import", + } + for skipTools, want := range testCases { + a := rootAnalyzer{skipTools: skipTools} + got, _ := a.Info() + if got != want { + t.Errorf("Expected the name of the importer with skipTools=%t to be '%s', got '%s'", skipTools, want, got) + } + } +} diff --git a/cmd/dep/status.go b/cmd/dep/status.go index 57d5440a50..52341431c0 100644 --- a/cmd/dep/status.go +++ b/cmd/dep/status.go @@ -15,6 +15,7 @@ import ( "github.com/golang/dep" "github.com/golang/dep/internal/gps" + "github.com/golang/dep/internal/gps/paths" "github.com/golang/dep/internal/gps/pkgtree" "github.com/pkg/errors" ) @@ -164,7 +165,7 @@ func (out *dotOutput) BasicHeader() { ptree, _ := pkgtree.ListPackages(out.p.AbsRoot, string(out.p.ImportRoot)) prm, _ := ptree.ToReachMap(true, false, false, nil) - out.g.createNode(string(out.p.ImportRoot), "", prm.Flatten(false)) + out.g.createNode(string(out.p.ImportRoot), "", prm.FlattenFn(paths.IsStandardImportPath)) } func (out *dotOutput) BasicFooter() { @@ -181,7 +182,7 @@ func (out *dotOutput) MissingLine(ms *MissingStatus) {} func (out *dotOutput) MissingFooter() {} func (cmd *statusCommand) Run(ctx *dep.Ctx, args []string) error { - p, err := ctx.LoadProject("") + p, err := ctx.LoadProject() if err != nil { return err } @@ -214,11 +215,24 @@ func (cmd *statusCommand) Run(ctx *dep.Ctx, args []string) error { } } - if err := runStatusAll(ctx.Loggers, out, p, sm); err != nil { + digestMismatch, hasMissingPkgs, err := runStatusAll(ctx.Loggers, out, p, sm) + if err != nil { return err } - ctx.Loggers.Out.Print(buf.String()) + if digestMismatch { + if hasMissingPkgs { + ctx.Loggers.Err.Println("Lock inputs-digest mismatch due to the following packages missing from the lock:\n") + ctx.Loggers.Out.Print(buf.String()) + ctx.Loggers.Err.Println("\nThis happens when a new import is added. Run `dep ensure` to install the missing packages.") + } else { + ctx.Loggers.Err.Printf("Lock inputs-digest mismatch. This happens when Gopkg.toml is modified.\n" + + "Run `dep ensure` to regenerate the inputs-digest.") + } + } else { + ctx.Loggers.Out.Print(buf.String()) + } + return nil } @@ -239,17 +253,19 @@ type MissingStatus struct { MissingPackages []string } -func runStatusAll(loggers *dep.Loggers, out outputter, p *dep.Project, sm gps.SourceManager) error { +func runStatusAll(loggers *dep.Loggers, out outputter, p *dep.Project, sm gps.SourceManager) (bool, bool, error) { + var digestMismatch, hasMissingPkgs bool + if p.Lock == nil { // TODO if we have no lock file, do...other stuff - return nil + return digestMismatch, hasMissingPkgs, nil } // While the network churns on ListVersions() requests, statically analyze // code from the current project. ptree, err := pkgtree.ListPackages(p.AbsRoot, string(p.ImportRoot)) if err != nil { - return errors.Errorf("analysis of local packages failed: %v", err) + return digestMismatch, hasMissingPkgs, errors.Errorf("analysis of local packages failed: %v", err) } // Set up a solver in order to check the InputHash. @@ -266,7 +282,7 @@ func runStatusAll(loggers *dep.Loggers, out outputter, p *dep.Project, sm gps.So s, err := gps.Prepare(params, sm) if err != nil { - return errors.Errorf("could not set up solver for input hashing: %s", err) + return digestMismatch, hasMissingPkgs, errors.Errorf("could not set up solver for input hashing: %s", err) } cm := collectConstraints(ptree, p, sm) @@ -277,7 +293,7 @@ func runStatusAll(loggers *dep.Loggers, out outputter, p *dep.Project, sm gps.So slp := p.Lock.Projects() sort.Sort(dep.SortedLockedProjects(slp)) - if bytes.Equal(s.HashInputs(), p.Lock.Memo) { + if bytes.Equal(s.HashInputs(), p.Lock.SolveMeta.InputsDigest) { // If these are equal, we're guaranteed that the lock is a transitively // complete picture of all deps. That eliminates the need for at least // some checks. @@ -297,11 +313,11 @@ func runStatusAll(loggers *dep.Loggers, out outputter, p *dep.Project, sm gps.So ptr, err := sm.ListPackages(proj.Ident(), proj.Version()) if err != nil { - return fmt.Errorf("analysis of %s package failed: %v", proj.Ident().ProjectRoot, err) + return digestMismatch, hasMissingPkgs, fmt.Errorf("analysis of %s package failed: %v", proj.Ident().ProjectRoot, err) } prm, _ := ptr.ToReachMap(true, false, false, nil) - bs.Children = prm.Flatten(false) + bs.Children = prm.FlattenFn(paths.IsStandardImportPath) } // Split apart the version from the lock into its constituent parts @@ -330,7 +346,7 @@ func runStatusAll(loggers *dep.Loggers, out outputter, p *dep.Project, sm gps.So // Only if we have a non-rev and non-plain version do/can we display // anything wrt the version's updateability. if bs.Version != nil && bs.Version.Type() != gps.IsVersion { - c, has := p.Manifest.Dependencies[proj.Ident().ProjectRoot] + c, has := p.Manifest.Constraints[proj.Ident().ProjectRoot] if !has { c.Constraint = gps.Any() } @@ -359,7 +375,7 @@ func runStatusAll(loggers *dep.Loggers, out outputter, p *dep.Project, sm gps.So } out.BasicFooter() - return nil + return digestMismatch, hasMissingPkgs, nil } // Hash digest mismatch may indicate that some deps are no longer @@ -368,9 +384,10 @@ func runStatusAll(loggers *dep.Loggers, out outputter, p *dep.Project, sm gps.So // // It's possible for digests to not match, but still have a correct // lock. + digestMismatch = true rm, _ := ptree.ToReachMap(true, true, false, nil) - external := rm.Flatten(false) + external := rm.FlattenFn(paths.IsStandardImportPath) roots := make(map[gps.ProjectRoot][]string, len(external)) type fail struct { @@ -400,7 +417,7 @@ func runStatusAll(loggers *dep.Loggers, out outputter, p *dep.Project, sm gps.So loggers.Err.Printf("\t%s: %s\n", fail.ex, fail.err.Error()) } - return errors.New("address issues with undeducable import paths to get more status information.") + return digestMismatch, hasMissingPkgs, errors.New("address issues with undeducible import paths to get more status information") } out.MissingHeader() @@ -415,11 +432,12 @@ outer: } } + hasMissingPkgs = true out.MissingLine(&MissingStatus{ProjectRoot: string(root), MissingPackages: pkgs}) } out.MissingFooter() - return nil + return digestMismatch, hasMissingPkgs, nil } func formatVersion(v gps.Version) string { diff --git a/cmd/dep/status_test.go b/cmd/dep/status_test.go index ca54cb0b73..87c66f3021 100644 --- a/cmd/dep/status_test.go +++ b/cmd/dep/status_test.go @@ -11,6 +11,8 @@ import ( ) func TestStatusFormatVersion(t *testing.T) { + t.Parallel() + tests := map[gps.Version]string{ nil: "", gps.NewBranch("master"): "branch master", diff --git a/cmd/dep/testdata/glide/expected_import_output.txt b/cmd/dep/testdata/glide/expected_import_output.txt new file mode 100644 index 0000000000..7b599e9570 --- /dev/null +++ b/cmd/dep/testdata/glide/expected_import_output.txt @@ -0,0 +1,7 @@ +Detected glide configuration files... +Converting from glide.yaml and glide.lock... + Using ^0.8.1 as initial constraint for imported dep github.com/sdboyer/deptest + Trying v0.8.1 (3f4c3be) as initial lock for imported dep github.com/sdboyer/deptest + Using ^2.0.0 as initial constraint for imported dep github.com/sdboyer/deptestdos + Trying v2.0.0 (5c60720) as initial lock for imported dep github.com/sdboyer/deptestdos + Using cb00e56 as initial hint for imported dep github.com/golang/lint diff --git a/cmd/dep/testdata/glide/glide.lock b/cmd/dep/testdata/glide/glide.lock new file mode 100644 index 0000000000..0fae066028 --- /dev/null +++ b/cmd/dep/testdata/glide/glide.lock @@ -0,0 +1,12 @@ +hash: 16053c82a71f9bd509b05a4523df6bc418aed2083e4b8bd97a870bbc003256f8 +updated: 2017-03-07T17:02:32.214383898-06:00 +imports: +- name: github.com/sdboyer/deptest + repo: https://github.com/sdboyer/deptest.git + vcs: git + version: 3f4c3bea144e112a69bbe5d8d01c1b09a544253f +- name: github.com/sdboyer/deptestdos + version: 5c607206be5decd28e6263ffffdcee067266015e +testImports: +- name: github.com/golang/lint + version: cb00e5669539f047b2f4c53a421a01b0c8e172c6 diff --git a/cmd/dep/testdata/glide/glide.yaml b/cmd/dep/testdata/glide/glide.yaml new file mode 100644 index 0000000000..88a3f2be74 --- /dev/null +++ b/cmd/dep/testdata/glide/glide.yaml @@ -0,0 +1,20 @@ +package: github.com/golang/notexist +homepage: http://example.com +license: MIT +owners: +- name: Sam Boyer + email: sdboyer@example.com + homepage: http://sdboyer.io +ignore: +- github.com/sdboyer/dep-test +excludeDirs: +- samples +import: +- package: github.com/sdboyer/deptest + repo: https://github.com/sdboyer/deptest.git + vcs: git + version: master +- package: github.com/sdboyer/deptestdos + version: v2.0.0 +testImport: +- package: github.com/golang/lint diff --git a/cmd/dep/testdata/harness_tests/ensure/empty/case1/final/Gopkg.lock b/cmd/dep/testdata/harness_tests/ensure/empty/case1/final/Gopkg.lock index 2b9c20975d..c7f497e7a1 100644 --- a/cmd/dep/testdata/harness_tests/ensure/empty/case1/final/Gopkg.lock +++ b/cmd/dep/testdata/harness_tests/ensure/empty/case1/final/Gopkg.lock @@ -1,7 +1,15 @@ -memo = "14b07b05e0f01051b03887ab2bf80b516bc5510ea92f75f76c894b1745d8850c" +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + [[projects]] name = "github.com/sdboyer/deptest" packages = ["."] revision = "ff2948a2ac8f538c4ecd55962e919d1e13e74baf" version = "v1.0.0" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + inputs-digest = "14b07b05e0f01051b03887ab2bf80b516bc5510ea92f75f76c894b1745d8850c" + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/cmd/dep/testdata/harness_tests/ensure/empty/case1/final/Gopkg.toml b/cmd/dep/testdata/harness_tests/ensure/empty/case1/final/Gopkg.toml index 26987273ec..1bfe549576 100644 --- a/cmd/dep/testdata/harness_tests/ensure/empty/case1/final/Gopkg.toml +++ b/cmd/dep/testdata/harness_tests/ensure/empty/case1/final/Gopkg.toml @@ -1,6 +1,13 @@ ## Gopkg.toml example (these lines may be deleted) +## "metadata" defines metadata about the project that could be used by other independent +## systems. The metadata defined here will be ignored by dep. +# [metadata] +# key1 = "value that convey data to other systems" +# system1-data = "value that is used by a system" +# system2-data = "value that is used by another system" + ## "required" lists a set of packages (not projects) that must be included in ## Gopkg.lock. This list is merged with the set of packages imported by the current ## project. Use it when your project needs a package it doesn't explicitly import - @@ -12,9 +19,10 @@ ## or in a dependency. # ignored = ["github.com/user/project/badpkg"] -## Dependencies define constraints on dependent projects. They are respected by +## Constraints are rules for how directly imported projects +## may be incorporated into the depgraph. They are respected by ## dep whether coming from the Gopkg.toml of the current project or a dependency. -# [[dependencies]] +# [[constraint]] ## Required: the root import path of the project being constrained. # name = "github.com/user/project" # @@ -26,18 +34,25 @@ # ## Optional: an alternate location (URL or import path) for the project's source. # source = "https://github.com/myfork/package.git" +# +## "metadata" defines metadata about the dependency or override that could be used +## by other independent systems. The metadata defined here will be ignored by dep. +# [metadata] +# key1 = "value that convey data to other systems" +# system1-data = "value that is used by a system" +# system2-data = "value that is used by another system" -## Overrides have the same structure as [[dependencies]], but supercede all -## [[dependencies]] declarations from all projects. Only the current project's -## [[overrides]] are applied. +## Overrides have the same structure as [[constraint]], but supersede all +## [[constraint]] declarations from all projects. Only [[override]] from +## the current project's are applied. ## ## Overrides are a sledgehammer. Use them only as a last resort. -# [[overrides]] +# [[override]] ## Required: the root import path of the project being constrained. # name = "github.com/user/project" # ## Optional: specifying a version constraint override will cause all other -## constraints on this project to be ignored; only the overriden constraint +## constraints on this project to be ignored; only the overridden constraint ## need be satisfied. ## Again, only one of "branch", "version" or "revision" can be specified. # version = "1.0.0" @@ -51,6 +66,6 @@ -[[dependencies]] +[[constraint]] name = "github.com/sdboyer/deptest" - version = "^1.0.0" + version = "1.0.0" diff --git a/cmd/dep/testdata/harness_tests/ensure/empty/case1/testcase.json b/cmd/dep/testdata/harness_tests/ensure/empty/case1/testcase.json index 515a342de3..620bc4e060 100644 --- a/cmd/dep/testdata/harness_tests/ensure/empty/case1/testcase.json +++ b/cmd/dep/testdata/harness_tests/ensure/empty/case1/testcase.json @@ -1,8 +1,9 @@ { "commands": [ - ["init"], + ["init", "-skip-tools"], ["ensure", "-update"] ], + "error-expected": "", "vendor-final": [ "github.com/sdboyer/deptest" ] diff --git a/cmd/dep/testdata/harness_tests/ensure/empty/case2/final/Gopkg.lock b/cmd/dep/testdata/harness_tests/ensure/empty/case2/final/Gopkg.lock index 3c588e4d62..a783451f80 100644 --- a/cmd/dep/testdata/harness_tests/ensure/empty/case2/final/Gopkg.lock +++ b/cmd/dep/testdata/harness_tests/ensure/empty/case2/final/Gopkg.lock @@ -1,7 +1,15 @@ -memo = "e7725ea56516a42a641aaaf5d48754258d9f3c59949cb8a0e8a21b1ab6e07179" +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + [[projects]] name = "github.com/sdboyer/deptest" packages = ["."] revision = "ff2948a2ac8f538c4ecd55962e919d1e13e74baf" version = "v1.0.0" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + inputs-digest = "e7725ea56516a42a641aaaf5d48754258d9f3c59949cb8a0e8a21b1ab6e07179" + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/cmd/dep/testdata/harness_tests/ensure/empty/case2/final/Gopkg.toml b/cmd/dep/testdata/harness_tests/ensure/empty/case2/final/Gopkg.toml index d327c51ade..532da96a0d 100644 --- a/cmd/dep/testdata/harness_tests/ensure/empty/case2/final/Gopkg.toml +++ b/cmd/dep/testdata/harness_tests/ensure/empty/case2/final/Gopkg.toml @@ -1,3 +1,3 @@ -[[dependencies]] +[[constraint]] name = "github.com/sdboyer/deptest" version = "~0.8.0" \ No newline at end of file diff --git a/cmd/dep/testdata/harness_tests/ensure/empty/case2/initial/Gopkg.toml b/cmd/dep/testdata/harness_tests/ensure/empty/case2/initial/Gopkg.toml index d327c51ade..532da96a0d 100644 --- a/cmd/dep/testdata/harness_tests/ensure/empty/case2/initial/Gopkg.toml +++ b/cmd/dep/testdata/harness_tests/ensure/empty/case2/initial/Gopkg.toml @@ -1,3 +1,3 @@ -[[dependencies]] +[[constraint]] name = "github.com/sdboyer/deptest" version = "~0.8.0" \ No newline at end of file diff --git a/cmd/dep/testdata/harness_tests/ensure/empty/case2/testcase.json b/cmd/dep/testdata/harness_tests/ensure/empty/case2/testcase.json index 19da527090..729de9d0f4 100644 --- a/cmd/dep/testdata/harness_tests/ensure/empty/case2/testcase.json +++ b/cmd/dep/testdata/harness_tests/ensure/empty/case2/testcase.json @@ -2,6 +2,7 @@ "commands": [ ["ensure"] ], + "error-expected": "", "vendor-final": [ "github.com/sdboyer/deptest" ] diff --git a/cmd/dep/testdata/harness_tests/ensure/empty/case3/final/Gopkg.lock b/cmd/dep/testdata/harness_tests/ensure/empty/case3/final/Gopkg.lock index 4fc4f785c5..d2153e3747 100644 --- a/cmd/dep/testdata/harness_tests/ensure/empty/case3/final/Gopkg.lock +++ b/cmd/dep/testdata/harness_tests/ensure/empty/case3/final/Gopkg.lock @@ -1,7 +1,15 @@ -memo = "e5c16e09ed6f0a1a2b3cf472c34b7fd50861dd070e81d5e623f72e8173f0c065" +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + [[projects]] branch = "master" name = "github.com/sdboyer/deptest" packages = ["."] revision = "3f4c3bea144e112a69bbe5d8d01c1b09a544253f" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + inputs-digest = "e5c16e09ed6f0a1a2b3cf472c34b7fd50861dd070e81d5e623f72e8173f0c065" + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/cmd/dep/testdata/harness_tests/ensure/empty/case3/final/Gopkg.toml b/cmd/dep/testdata/harness_tests/ensure/empty/case3/final/Gopkg.toml index dd0150055a..d77e367c70 100644 --- a/cmd/dep/testdata/harness_tests/ensure/empty/case3/final/Gopkg.toml +++ b/cmd/dep/testdata/harness_tests/ensure/empty/case3/final/Gopkg.toml @@ -1,5 +1,5 @@ ignored = ["github.com/sdboyer/deptestdos"] -[[dependencies]] +[[constraint]] branch = "master" name = "github.com/sdboyer/deptest" diff --git a/cmd/dep/testdata/harness_tests/ensure/empty/case3/initial/Gopkg.toml b/cmd/dep/testdata/harness_tests/ensure/empty/case3/initial/Gopkg.toml index dd0150055a..d77e367c70 100644 --- a/cmd/dep/testdata/harness_tests/ensure/empty/case3/initial/Gopkg.toml +++ b/cmd/dep/testdata/harness_tests/ensure/empty/case3/initial/Gopkg.toml @@ -1,5 +1,5 @@ ignored = ["github.com/sdboyer/deptestdos"] -[[dependencies]] +[[constraint]] branch = "master" name = "github.com/sdboyer/deptest" diff --git a/cmd/dep/testdata/harness_tests/ensure/empty/case3/testcase.json b/cmd/dep/testdata/harness_tests/ensure/empty/case3/testcase.json index 19da527090..729de9d0f4 100644 --- a/cmd/dep/testdata/harness_tests/ensure/empty/case3/testcase.json +++ b/cmd/dep/testdata/harness_tests/ensure/empty/case3/testcase.json @@ -2,6 +2,7 @@ "commands": [ ["ensure"] ], + "error-expected": "", "vendor-final": [ "github.com/sdboyer/deptest" ] diff --git a/cmd/dep/testdata/harness_tests/ensure/override/case1/final/Gopkg.lock b/cmd/dep/testdata/harness_tests/ensure/override/case1/final/Gopkg.lock index e045b6b22c..ccd3cc61fc 100644 --- a/cmd/dep/testdata/harness_tests/ensure/override/case1/final/Gopkg.lock +++ b/cmd/dep/testdata/harness_tests/ensure/override/case1/final/Gopkg.lock @@ -1,7 +1,15 @@ -memo = "8bca9526e654e56e05d9075d1f33fa5b649bf6d58aa7d71ca39e7fbea8468e07" +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + [[projects]] name = "github.com/sdboyer/deptest" packages = ["."] revision = "ff2948a2ac8f538c4ecd55962e919d1e13e74baf" version = "v1.0.0" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + inputs-digest = "8bca9526e654e56e05d9075d1f33fa5b649bf6d58aa7d71ca39e7fbea8468e07" + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/cmd/dep/testdata/harness_tests/ensure/override/case1/final/Gopkg.toml b/cmd/dep/testdata/harness_tests/ensure/override/case1/final/Gopkg.toml index 26987273ec..1bfe549576 100644 --- a/cmd/dep/testdata/harness_tests/ensure/override/case1/final/Gopkg.toml +++ b/cmd/dep/testdata/harness_tests/ensure/override/case1/final/Gopkg.toml @@ -1,6 +1,13 @@ ## Gopkg.toml example (these lines may be deleted) +## "metadata" defines metadata about the project that could be used by other independent +## systems. The metadata defined here will be ignored by dep. +# [metadata] +# key1 = "value that convey data to other systems" +# system1-data = "value that is used by a system" +# system2-data = "value that is used by another system" + ## "required" lists a set of packages (not projects) that must be included in ## Gopkg.lock. This list is merged with the set of packages imported by the current ## project. Use it when your project needs a package it doesn't explicitly import - @@ -12,9 +19,10 @@ ## or in a dependency. # ignored = ["github.com/user/project/badpkg"] -## Dependencies define constraints on dependent projects. They are respected by +## Constraints are rules for how directly imported projects +## may be incorporated into the depgraph. They are respected by ## dep whether coming from the Gopkg.toml of the current project or a dependency. -# [[dependencies]] +# [[constraint]] ## Required: the root import path of the project being constrained. # name = "github.com/user/project" # @@ -26,18 +34,25 @@ # ## Optional: an alternate location (URL or import path) for the project's source. # source = "https://github.com/myfork/package.git" +# +## "metadata" defines metadata about the dependency or override that could be used +## by other independent systems. The metadata defined here will be ignored by dep. +# [metadata] +# key1 = "value that convey data to other systems" +# system1-data = "value that is used by a system" +# system2-data = "value that is used by another system" -## Overrides have the same structure as [[dependencies]], but supercede all -## [[dependencies]] declarations from all projects. Only the current project's -## [[overrides]] are applied. +## Overrides have the same structure as [[constraint]], but supersede all +## [[constraint]] declarations from all projects. Only [[override]] from +## the current project's are applied. ## ## Overrides are a sledgehammer. Use them only as a last resort. -# [[overrides]] +# [[override]] ## Required: the root import path of the project being constrained. # name = "github.com/user/project" # ## Optional: specifying a version constraint override will cause all other -## constraints on this project to be ignored; only the overriden constraint +## constraints on this project to be ignored; only the overridden constraint ## need be satisfied. ## Again, only one of "branch", "version" or "revision" can be specified. # version = "1.0.0" @@ -51,6 +66,6 @@ -[[dependencies]] +[[constraint]] name = "github.com/sdboyer/deptest" - version = "^1.0.0" + version = "1.0.0" diff --git a/cmd/dep/testdata/harness_tests/ensure/override/case1/testcase.json b/cmd/dep/testdata/harness_tests/ensure/override/case1/testcase.json index c66b07ff04..72e29594ac 100644 --- a/cmd/dep/testdata/harness_tests/ensure/override/case1/testcase.json +++ b/cmd/dep/testdata/harness_tests/ensure/override/case1/testcase.json @@ -1,8 +1,9 @@ { "commands": [ - ["init"], - ["ensure", "-override", "github.com/sdboyer/deptest@1.0.0"] + ["init", "-skip-tools"], + ["ensure", "-override", "github.com/sdboyer/deptest@=1.0.0"] ], + "error-expected": "", "vendor-final": [ "github.com/sdboyer/deptest" ] diff --git a/cmd/dep/testdata/harness_tests/ensure/package/case1/final/Gopkg.lock b/cmd/dep/testdata/harness_tests/ensure/package/case1/final/Gopkg.lock new file mode 100644 index 0000000000..c7f497e7a1 --- /dev/null +++ b/cmd/dep/testdata/harness_tests/ensure/package/case1/final/Gopkg.lock @@ -0,0 +1,15 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + name = "github.com/sdboyer/deptest" + packages = ["."] + revision = "ff2948a2ac8f538c4ecd55962e919d1e13e74baf" + version = "v1.0.0" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + inputs-digest = "14b07b05e0f01051b03887ab2bf80b516bc5510ea92f75f76c894b1745d8850c" + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/cmd/dep/testdata/harness_tests/ensure/package/case1/final/Gopkg.toml b/cmd/dep/testdata/harness_tests/ensure/package/case1/final/Gopkg.toml new file mode 100644 index 0000000000..bbd0a05ba9 --- /dev/null +++ b/cmd/dep/testdata/harness_tests/ensure/package/case1/final/Gopkg.toml @@ -0,0 +1,4 @@ + +[[constraint]] + name = "github.com/sdboyer/deptest" + version = "0.8.0" diff --git a/cmd/dep/testdata/harness_tests/remove/specific/case2/final/Gopkg.lock b/cmd/dep/testdata/harness_tests/ensure/package/case1/initial/Gopkg.lock similarity index 57% rename from cmd/dep/testdata/harness_tests/remove/specific/case2/final/Gopkg.lock rename to cmd/dep/testdata/harness_tests/ensure/package/case1/initial/Gopkg.lock index cb091e429d..7d21016835 100644 --- a/cmd/dep/testdata/harness_tests/remove/specific/case2/final/Gopkg.lock +++ b/cmd/dep/testdata/harness_tests/ensure/package/case1/initial/Gopkg.lock @@ -1,7 +1,7 @@ -memo = "38d8431865759ee3bf28fbdfc464f98ee8b56319394ec717df45e9969544cfca" +memo = "88d2718cda70cce45158f953d2c6ead79c1db38e67e9704aff72be8fddb096e7" [[projects]] name = "github.com/sdboyer/deptest" packages = ["."] revision = "ff2948a2ac8f538c4ecd55962e919d1e13e74baf" - version = "v1.0.0" + version = "v0.8.0" diff --git a/cmd/dep/testdata/harness_tests/ensure/package/case1/initial/Gopkg.toml b/cmd/dep/testdata/harness_tests/ensure/package/case1/initial/Gopkg.toml new file mode 100644 index 0000000000..bbd0a05ba9 --- /dev/null +++ b/cmd/dep/testdata/harness_tests/ensure/package/case1/initial/Gopkg.toml @@ -0,0 +1,4 @@ + +[[constraint]] + name = "github.com/sdboyer/deptest" + version = "0.8.0" diff --git a/cmd/dep/testdata/harness_tests/ensure/package/case1/initial/main.go b/cmd/dep/testdata/harness_tests/ensure/package/case1/initial/main.go new file mode 100644 index 0000000000..e23fcf34c5 --- /dev/null +++ b/cmd/dep/testdata/harness_tests/ensure/package/case1/initial/main.go @@ -0,0 +1,12 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + _ "github.com/sdboyer/deptest" +) + +func main() { +} diff --git a/cmd/dep/testdata/harness_tests/ensure/package/case1/testcase.json b/cmd/dep/testdata/harness_tests/ensure/package/case1/testcase.json new file mode 100644 index 0000000000..c14af6577e --- /dev/null +++ b/cmd/dep/testdata/harness_tests/ensure/package/case1/testcase.json @@ -0,0 +1,13 @@ +{ + "commands": [ + ["ensure", "github.com/sdboyer/deptest@v1.0.0"] + ], + "error-expected": "", + "gopath-initial": { + "github.com/sdboyer/deptest": "v0.8.0", + "github.com/sdboyer/deptestdos": "a0196baa11ea047dd65037287451d36b861b00ea" + }, + "vendor-final": [ + "github.com/sdboyer/deptest" + ] +} diff --git a/cmd/dep/testdata/harness_tests/ensure/package/case2/final/Gopkg.lock b/cmd/dep/testdata/harness_tests/ensure/package/case2/final/Gopkg.lock new file mode 100644 index 0000000000..47206963e8 --- /dev/null +++ b/cmd/dep/testdata/harness_tests/ensure/package/case2/final/Gopkg.lock @@ -0,0 +1,15 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + name = "github.com/sdboyer/deptest" + packages = ["."] + revision = "ff2948a2ac8f538c4ecd55962e919d1e13e74baf" + version = "v0.8.0" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + inputs-digest = "b4e7a61f3a7fd8bbb1db94ed05b0d3675fb796d6058ff0a6fa95a47be0a171eb" + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/cmd/dep/testdata/harness_tests/ensure/package/case2/final/Gopkg.toml b/cmd/dep/testdata/harness_tests/ensure/package/case2/final/Gopkg.toml new file mode 100644 index 0000000000..bbd0a05ba9 --- /dev/null +++ b/cmd/dep/testdata/harness_tests/ensure/package/case2/final/Gopkg.toml @@ -0,0 +1,4 @@ + +[[constraint]] + name = "github.com/sdboyer/deptest" + version = "0.8.0" diff --git a/cmd/dep/testdata/harness_tests/remove/force/case1/final/Gopkg.lock b/cmd/dep/testdata/harness_tests/ensure/package/case2/initial/Gopkg.lock similarity index 53% rename from cmd/dep/testdata/harness_tests/remove/force/case1/final/Gopkg.lock rename to cmd/dep/testdata/harness_tests/ensure/package/case2/initial/Gopkg.lock index 03b152c335..7d21016835 100644 --- a/cmd/dep/testdata/harness_tests/remove/force/case1/final/Gopkg.lock +++ b/cmd/dep/testdata/harness_tests/ensure/package/case2/initial/Gopkg.lock @@ -4,10 +4,4 @@ memo = "88d2718cda70cce45158f953d2c6ead79c1db38e67e9704aff72be8fddb096e7" name = "github.com/sdboyer/deptest" packages = ["."] revision = "ff2948a2ac8f538c4ecd55962e919d1e13e74baf" - version = "v1.0.0" - -[[projects]] - name = "github.com/sdboyer/deptestdos" - packages = ["."] - revision = "5c607206be5decd28e6263ffffdcee067266015e" - version = "v2.0.0" + version = "v0.8.0" diff --git a/cmd/dep/testdata/harness_tests/ensure/package/case2/initial/Gopkg.toml b/cmd/dep/testdata/harness_tests/ensure/package/case2/initial/Gopkg.toml new file mode 100644 index 0000000000..bbd0a05ba9 --- /dev/null +++ b/cmd/dep/testdata/harness_tests/ensure/package/case2/initial/Gopkg.toml @@ -0,0 +1,4 @@ + +[[constraint]] + name = "github.com/sdboyer/deptest" + version = "0.8.0" diff --git a/cmd/dep/testdata/harness_tests/ensure/package/case2/initial/main.go b/cmd/dep/testdata/harness_tests/ensure/package/case2/initial/main.go new file mode 100644 index 0000000000..e23fcf34c5 --- /dev/null +++ b/cmd/dep/testdata/harness_tests/ensure/package/case2/initial/main.go @@ -0,0 +1,12 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + _ "github.com/sdboyer/deptest" +) + +func main() { +} diff --git a/cmd/dep/testdata/harness_tests/remove/specific/case2/testcase.json b/cmd/dep/testdata/harness_tests/ensure/package/case2/testcase.json similarity index 69% rename from cmd/dep/testdata/harness_tests/remove/specific/case2/testcase.json rename to cmd/dep/testdata/harness_tests/ensure/package/case2/testcase.json index 1a5f806b6c..e737374376 100644 --- a/cmd/dep/testdata/harness_tests/remove/specific/case2/testcase.json +++ b/cmd/dep/testdata/harness_tests/ensure/package/case2/testcase.json @@ -1,8 +1,9 @@ { "commands": [ - ["remove", "github.com/not/used"] + ["ensure", "github.com/sdboyer/deptest"] ], - "vendor-initial": { + "error-expected": "", + "gopath-initial": { "github.com/sdboyer/deptest": "v0.8.0", "github.com/sdboyer/deptestdos": "a0196baa11ea047dd65037287451d36b861b00ea" }, diff --git a/cmd/dep/testdata/harness_tests/ensure/pkg-errors/case1/final/Gopkg.lock b/cmd/dep/testdata/harness_tests/ensure/pkg-errors/case1/final/Gopkg.lock index 70280d1ade..bef2d0092e 100644 --- a/cmd/dep/testdata/harness_tests/ensure/pkg-errors/case1/final/Gopkg.lock +++ b/cmd/dep/testdata/harness_tests/ensure/pkg-errors/case1/final/Gopkg.lock @@ -1 +1,9 @@ -memo = "ab4fef131ee828e96ba67d31a7d690bd5f2f42040c6766b1b12fe856f87e0ff7" +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + inputs-digest = "ab4fef131ee828e96ba67d31a7d690bd5f2f42040c6766b1b12fe856f87e0ff7" + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/cmd/dep/testdata/harness_tests/ensure/pkg-errors/case1/testcase.json b/cmd/dep/testdata/harness_tests/ensure/pkg-errors/case1/testcase.json index d3635f8d62..2ccd4f9041 100644 --- a/cmd/dep/testdata/harness_tests/ensure/pkg-errors/case1/testcase.json +++ b/cmd/dep/testdata/harness_tests/ensure/pkg-errors/case1/testcase.json @@ -1,7 +1,8 @@ { - "commands": [ - ["init", "-no-examples"], - ["ensure", "-update"] - ], - "error-expected" : "all dirs lacked any go code" - } + "commands": [ + ["init", "-no-examples", "-skip-tools"], + ["ensure", "-update"] + ], + "error-expected": "all dirs lacked any go code", + "vendor-final": [] +} diff --git a/cmd/dep/testdata/harness_tests/ensure/update/case1/final/Gopkg.lock b/cmd/dep/testdata/harness_tests/ensure/update/case1/final/Gopkg.lock index b808a70fc4..1a7b1983f3 100644 --- a/cmd/dep/testdata/harness_tests/ensure/update/case1/final/Gopkg.lock +++ b/cmd/dep/testdata/harness_tests/ensure/update/case1/final/Gopkg.lock @@ -1,4 +1,5 @@ -memo = "1b381263a360eafafe3ef7f9be626672668d17250a3c9a8debd169d1b5e2eebb" +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + [[projects]] name = "github.com/sdboyer/deptest" @@ -11,3 +12,10 @@ memo = "1b381263a360eafafe3ef7f9be626672668d17250a3c9a8debd169d1b5e2eebb" packages = ["."] revision = "5c607206be5decd28e6263ffffdcee067266015e" version = "v2.0.0" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + inputs-digest = "1b381263a360eafafe3ef7f9be626672668d17250a3c9a8debd169d1b5e2eebb" + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/cmd/dep/testdata/harness_tests/ensure/update/case1/final/Gopkg.toml b/cmd/dep/testdata/harness_tests/ensure/update/case1/final/Gopkg.toml index d327c51ade..532da96a0d 100644 --- a/cmd/dep/testdata/harness_tests/ensure/update/case1/final/Gopkg.toml +++ b/cmd/dep/testdata/harness_tests/ensure/update/case1/final/Gopkg.toml @@ -1,3 +1,3 @@ -[[dependencies]] +[[constraint]] name = "github.com/sdboyer/deptest" version = "~0.8.0" \ No newline at end of file diff --git a/cmd/dep/testdata/harness_tests/ensure/update/case1/initial/Gopkg.toml b/cmd/dep/testdata/harness_tests/ensure/update/case1/initial/Gopkg.toml index d327c51ade..532da96a0d 100644 --- a/cmd/dep/testdata/harness_tests/ensure/update/case1/initial/Gopkg.toml +++ b/cmd/dep/testdata/harness_tests/ensure/update/case1/initial/Gopkg.toml @@ -1,3 +1,3 @@ -[[dependencies]] +[[constraint]] name = "github.com/sdboyer/deptest" version = "~0.8.0" \ No newline at end of file diff --git a/cmd/dep/testdata/harness_tests/ensure/update/case1/testcase.json b/cmd/dep/testdata/harness_tests/ensure/update/case1/testcase.json index cb357104b5..07bc635269 100644 --- a/cmd/dep/testdata/harness_tests/ensure/update/case1/testcase.json +++ b/cmd/dep/testdata/harness_tests/ensure/update/case1/testcase.json @@ -2,6 +2,7 @@ "commands": [ ["ensure", "-update", "github.com/sdboyer/deptest"] ], + "error-expected": "", "vendor-final": [ "github.com/sdboyer/deptest", "github.com/sdboyer/deptestdos" diff --git a/cmd/dep/testdata/harness_tests/ensure/update/case2/final/Gopkg.lock b/cmd/dep/testdata/harness_tests/ensure/update/case2/final/Gopkg.lock index 362b3ce3c1..4068ebbd34 100644 --- a/cmd/dep/testdata/harness_tests/ensure/update/case2/final/Gopkg.lock +++ b/cmd/dep/testdata/harness_tests/ensure/update/case2/final/Gopkg.lock @@ -1,3 +1,5 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + memo = "9a5243dd3fa20feeaa20398e7283d6c566532e2af1aae279a010df34793761c5" [[projects]] diff --git a/cmd/dep/testdata/harness_tests/ensure/update/case2/final/Gopkg.toml b/cmd/dep/testdata/harness_tests/ensure/update/case2/final/Gopkg.toml index d327c51ade..532da96a0d 100644 --- a/cmd/dep/testdata/harness_tests/ensure/update/case2/final/Gopkg.toml +++ b/cmd/dep/testdata/harness_tests/ensure/update/case2/final/Gopkg.toml @@ -1,3 +1,3 @@ -[[dependencies]] +[[constraint]] name = "github.com/sdboyer/deptest" version = "~0.8.0" \ No newline at end of file diff --git a/cmd/dep/testdata/harness_tests/ensure/update/case2/initial/Gopkg.lock b/cmd/dep/testdata/harness_tests/ensure/update/case2/initial/Gopkg.lock index 362b3ce3c1..4068ebbd34 100644 --- a/cmd/dep/testdata/harness_tests/ensure/update/case2/initial/Gopkg.lock +++ b/cmd/dep/testdata/harness_tests/ensure/update/case2/initial/Gopkg.lock @@ -1,3 +1,5 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + memo = "9a5243dd3fa20feeaa20398e7283d6c566532e2af1aae279a010df34793761c5" [[projects]] diff --git a/cmd/dep/testdata/harness_tests/ensure/update/case2/initial/Gopkg.toml b/cmd/dep/testdata/harness_tests/ensure/update/case2/initial/Gopkg.toml index d327c51ade..532da96a0d 100644 --- a/cmd/dep/testdata/harness_tests/ensure/update/case2/initial/Gopkg.toml +++ b/cmd/dep/testdata/harness_tests/ensure/update/case2/initial/Gopkg.toml @@ -1,3 +1,3 @@ -[[dependencies]] +[[constraint]] name = "github.com/sdboyer/deptest" version = "~0.8.0" \ No newline at end of file diff --git a/cmd/dep/testdata/harness_tests/ensure/update/case2/testcase.json b/cmd/dep/testdata/harness_tests/ensure/update/case2/testcase.json index aa86ad32f3..f0ac6111e5 100644 --- a/cmd/dep/testdata/harness_tests/ensure/update/case2/testcase.json +++ b/cmd/dep/testdata/harness_tests/ensure/update/case2/testcase.json @@ -2,5 +2,6 @@ "commands": [ ["ensure", "-n", "-update", "github.com/sdboyer/deptest"] ], + "error-expected": "", "vendor-final": [] } diff --git a/cmd/dep/testdata/harness_tests/init/case1/final/Gopkg.lock b/cmd/dep/testdata/harness_tests/init/case1/final/Gopkg.lock index d43aa9f9ac..15b4e08bac 100644 --- a/cmd/dep/testdata/harness_tests/init/case1/final/Gopkg.lock +++ b/cmd/dep/testdata/harness_tests/init/case1/final/Gopkg.lock @@ -1,4 +1,5 @@ -memo = "88d2718cda70cce45158f953d2c6ead79c1db38e67e9704aff72be8fddb096e7" +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + [[projects]] name = "github.com/sdboyer/deptest" @@ -10,3 +11,10 @@ memo = "88d2718cda70cce45158f953d2c6ead79c1db38e67e9704aff72be8fddb096e7" name = "github.com/sdboyer/deptestdos" packages = ["."] revision = "a0196baa11ea047dd65037287451d36b861b00ea" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + inputs-digest = "1b381263a360eafafe3ef7f9be626672668d17250a3c9a8debd169d1b5e2eebb" + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/cmd/dep/testdata/harness_tests/init/case1/final/Gopkg.toml b/cmd/dep/testdata/harness_tests/init/case1/final/Gopkg.toml index 0681d4cf62..bbd0a05ba9 100644 --- a/cmd/dep/testdata/harness_tests/init/case1/final/Gopkg.toml +++ b/cmd/dep/testdata/harness_tests/init/case1/final/Gopkg.toml @@ -1,7 +1,4 @@ -[[dependencies]] +[[constraint]] name = "github.com/sdboyer/deptest" - version = ">=0.8.0, <1.0.0" - -[[dependencies]] - name = "github.com/sdboyer/deptestdos" + version = "0.8.0" diff --git a/cmd/dep/testdata/harness_tests/init/case1/testcase.json b/cmd/dep/testdata/harness_tests/init/case1/testcase.json index ea0ff80424..d2403484b7 100644 --- a/cmd/dep/testdata/harness_tests/init/case1/testcase.json +++ b/cmd/dep/testdata/harness_tests/init/case1/testcase.json @@ -1,7 +1,8 @@ { "commands": [ - ["init", "-no-examples"] + ["init", "-no-examples", "-skip-tools"] ], + "error-expected": "", "gopath-initial": { "github.com/sdboyer/deptest": "v0.8.0", "github.com/sdboyer/deptestdos": "a0196baa11ea047dd65037287451d36b861b00ea" diff --git a/cmd/dep/testdata/harness_tests/init/case2/final/Gopkg.lock b/cmd/dep/testdata/harness_tests/init/case2/final/Gopkg.lock index 50ed86f68e..608d5a8d97 100644 --- a/cmd/dep/testdata/harness_tests/init/case2/final/Gopkg.lock +++ b/cmd/dep/testdata/harness_tests/init/case2/final/Gopkg.lock @@ -1,4 +1,5 @@ -memo = "b4fe6e8bceac924197838b6ea47989abbdd3a8d31035d20ee0a1dabc0994c368" +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + [[projects]] name = "github.com/sdboyer/deptest" @@ -11,3 +12,10 @@ memo = "b4fe6e8bceac924197838b6ea47989abbdd3a8d31035d20ee0a1dabc0994c368" packages = ["."] revision = "5c607206be5decd28e6263ffffdcee067266015e" version = "v2.0.0" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + inputs-digest = "ced51326ad990b11098d8076d0f7d72d89eee1ba6e8dacc7bc73be05cddac438" + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/cmd/dep/testdata/harness_tests/init/case2/final/Gopkg.toml b/cmd/dep/testdata/harness_tests/init/case2/final/Gopkg.toml index 487298ca78..bea60bf6e6 100644 --- a/cmd/dep/testdata/harness_tests/init/case2/final/Gopkg.toml +++ b/cmd/dep/testdata/harness_tests/init/case2/final/Gopkg.toml @@ -1,8 +1,8 @@ -[[dependencies]] +[[constraint]] name = "github.com/sdboyer/deptest" - version = ">=0.8.0, <1.0.0" + version = "0.8.0" -[[dependencies]] +[[constraint]] name = "github.com/sdboyer/deptestdos" - version = "^2.0.0" + version = "2.0.0" diff --git a/cmd/dep/testdata/harness_tests/init/case2/testcase.json b/cmd/dep/testdata/harness_tests/init/case2/testcase.json index 3f3140bc04..9167534f7b 100644 --- a/cmd/dep/testdata/harness_tests/init/case2/testcase.json +++ b/cmd/dep/testdata/harness_tests/init/case2/testcase.json @@ -1,7 +1,8 @@ { "commands": [ - ["init", "-no-examples"] + ["init", "-no-examples", "-skip-tools"] ], + "error-expected": "", "gopath-initial": { "github.com/sdboyer/deptest": "v0.8.0" }, diff --git a/cmd/dep/testdata/harness_tests/init/case3/final/Gopkg.lock b/cmd/dep/testdata/harness_tests/init/case3/final/Gopkg.lock index 465f59501e..c4f18284da 100644 --- a/cmd/dep/testdata/harness_tests/init/case3/final/Gopkg.lock +++ b/cmd/dep/testdata/harness_tests/init/case3/final/Gopkg.lock @@ -1,4 +1,5 @@ -memo = "af9a783a5430dabcaaf44683c09e2b729e1c0d61f13bfdf6677c4fd0b41387ca" +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + [[projects]] branch = "master" @@ -10,3 +11,10 @@ memo = "af9a783a5430dabcaaf44683c09e2b729e1c0d61f13bfdf6677c4fd0b41387ca" name = "github.com/sdboyer/deptestdos" packages = ["."] revision = "a0196baa11ea047dd65037287451d36b861b00ea" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + inputs-digest = "af9a783a5430dabcaaf44683c09e2b729e1c0d61f13bfdf6677c4fd0b41387ca" + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/cmd/dep/testdata/harness_tests/init/case3/final/Gopkg.toml b/cmd/dep/testdata/harness_tests/init/case3/final/Gopkg.toml index bccfadb86a..a90dd2dadd 100644 --- a/cmd/dep/testdata/harness_tests/init/case3/final/Gopkg.toml +++ b/cmd/dep/testdata/harness_tests/init/case3/final/Gopkg.toml @@ -1,7 +1,4 @@ -[[dependencies]] +[[constraint]] branch = "master" name = "github.com/sdboyer/deptest" - -[[dependencies]] - name = "github.com/sdboyer/deptestdos" diff --git a/cmd/dep/testdata/harness_tests/init/case3/testcase.json b/cmd/dep/testdata/harness_tests/init/case3/testcase.json index 13cba35aeb..d50e7d91cb 100644 --- a/cmd/dep/testdata/harness_tests/init/case3/testcase.json +++ b/cmd/dep/testdata/harness_tests/init/case3/testcase.json @@ -1,7 +1,8 @@ { "commands": [ - ["init", "-no-examples"] + ["init", "-no-examples", "-skip-tools"] ], + "error-expected": "", "gopath-initial": { "github.com/sdboyer/deptestdos": "a0196baa11ea047dd65037287451d36b861b00ea" }, diff --git a/cmd/dep/testdata/harness_tests/init/glide/case1/README.md b/cmd/dep/testdata/harness_tests/init/glide/case1/README.md new file mode 100644 index 0000000000..899680ac00 --- /dev/null +++ b/cmd/dep/testdata/harness_tests/init/glide/case1/README.md @@ -0,0 +1 @@ +Import glide config at project root. \ No newline at end of file diff --git a/cmd/dep/testdata/harness_tests/init/glide/case1/final/Gopkg.lock b/cmd/dep/testdata/harness_tests/init/glide/case1/final/Gopkg.lock new file mode 100644 index 0000000000..56f9bb2b7a --- /dev/null +++ b/cmd/dep/testdata/harness_tests/init/glide/case1/final/Gopkg.lock @@ -0,0 +1,21 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + name = "github.com/sdboyer/deptest" + packages = ["."] + revision = "ff2948a2ac8f538c4ecd55962e919d1e13e74baf" + version = "v1.0.0" + +[[projects]] + name = "github.com/sdboyer/deptestdos" + packages = ["."] + revision = "5c607206be5decd28e6263ffffdcee067266015e" + version = "v2.0.0" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + inputs-digest = "c53803413bd0160505cce903e1cba743e0b964088f8cc42a6123f6fe1a0ae9d3" + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/cmd/dep/testdata/harness_tests/init/glide/case1/final/Gopkg.toml b/cmd/dep/testdata/harness_tests/init/glide/case1/final/Gopkg.toml new file mode 100644 index 0000000000..ab7893d76e --- /dev/null +++ b/cmd/dep/testdata/harness_tests/init/glide/case1/final/Gopkg.toml @@ -0,0 +1,5 @@ +ignored = ["github.com/sdboyer/dep-test","github.com/golang/notexist/samples"] + +[[constraint]] + name = "github.com/sdboyer/deptestdos" + version = "2.0.0" diff --git a/cmd/dep/testdata/harness_tests/init/glide/case1/initial/glide.lock b/cmd/dep/testdata/harness_tests/init/glide/case1/initial/glide.lock new file mode 100644 index 0000000000..1d29509927 --- /dev/null +++ b/cmd/dep/testdata/harness_tests/init/glide/case1/initial/glide.lock @@ -0,0 +1,12 @@ +hash: 16053c82a71f9bd509b05a4523df6bc418aed2083e4b8bd97a870bbc003256f8 +updated: 2017-03-07T17:02:32.214383898-06:00 +imports: +- name: github.com/sdboyer/deptest + repo: https://github.com/sdboyer/deptest.git + vcs: git + version: ff2948a2ac8f538c4ecd55962e919d1e13e74baf +- name: github.com/sdboyer/deptestdos + version: 5c607206be5decd28e6263ffffdcee067266015e +testImports: +- name: github.com/golang/lint + version: cb00e5669539f047b2f4c53a421a01b0c8e172c6 diff --git a/cmd/dep/testdata/harness_tests/init/glide/case1/initial/glide.yaml b/cmd/dep/testdata/harness_tests/init/glide/case1/initial/glide.yaml new file mode 100644 index 0000000000..ee269a398d --- /dev/null +++ b/cmd/dep/testdata/harness_tests/init/glide/case1/initial/glide.yaml @@ -0,0 +1,20 @@ +package: github.com/golang/notexist +homepage: http://example.com +license: MIT +owners: +- name: Sam Boyer + email: sdboyer@example.com + homepage: http://sdboyer.io +ignore: +- github.com/sdboyer/dep-test +excludeDirs: +- samples +import: +- package: github.com/sdboyer/deptest # This is a transitive dep and will be ignored + repo: https://github.com/sdboyer/deptest.git + vcs: git + version: v1.0.0 +- package: github.com/sdboyer/deptestdos + version: v2.0.0 +testImport: +- package: github.com/golang/lint diff --git a/cmd/dep/testdata/harness_tests/remove/specific/case1/initial/main.go b/cmd/dep/testdata/harness_tests/init/glide/case1/initial/main.go similarity index 51% rename from cmd/dep/testdata/harness_tests/remove/specific/case1/initial/main.go rename to cmd/dep/testdata/harness_tests/init/glide/case1/initial/main.go index 2eae5b511d..2b2c7c396e 100644 --- a/cmd/dep/testdata/harness_tests/remove/specific/case1/initial/main.go +++ b/cmd/dep/testdata/harness_tests/init/glide/case1/initial/main.go @@ -1,18 +1,16 @@ -// Copyright 2016 The Go Authors. All rights reserved. +// Copyright 2017 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package main import ( - "github.com/sdboyer/deptest" + "fmt" + "github.com/sdboyer/deptestdos" ) func main() { - err := nil - if err != nil { - deptest.Map["yo yo!"] - } - deptestdos.diMeLo("whatev") + var x deptestdos.Bar + fmt.Println(x) } diff --git a/cmd/dep/testdata/harness_tests/remove/specific/case2/initial/main.go b/cmd/dep/testdata/harness_tests/init/glide/case1/initial/samples/samples.go similarity index 56% rename from cmd/dep/testdata/harness_tests/remove/specific/case2/initial/main.go rename to cmd/dep/testdata/harness_tests/init/glide/case1/initial/samples/samples.go index 71416f1751..3e160f22fe 100644 --- a/cmd/dep/testdata/harness_tests/remove/specific/case2/initial/main.go +++ b/cmd/dep/testdata/harness_tests/init/glide/case1/initial/samples/samples.go @@ -2,15 +2,11 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package main +package samples -import ( - "github.com/sdboyer/deptest" -) +import dt "github.com/carolynvs/go-dep-test" -func main() { - err := nil - if err != nil { - deptest.Map["yo yo!"] - } +func Sample1() int { + var x = dt.Thing + return x } diff --git a/cmd/dep/testdata/harness_tests/init/glide/case1/testcase.json b/cmd/dep/testdata/harness_tests/init/glide/case1/testcase.json new file mode 100644 index 0000000000..85a05ed658 --- /dev/null +++ b/cmd/dep/testdata/harness_tests/init/glide/case1/testcase.json @@ -0,0 +1,14 @@ +{ + "commands": [ + ["init", "-no-examples"] + ], + "error-expected": "", + "gopath-initial": { + "github.com/sdboyer/deptest": "3f4c3bea144e112a69bbe5d8d01c1b09a544253f", + "github.com/sdboyer/deptestdos": "5c607206be5decd28e6263ffffdcee067266015e" + }, + "vendor-final": [ + "github.com/sdboyer/deptest", + "github.com/sdboyer/deptestdos" + ] +} diff --git a/cmd/dep/testdata/harness_tests/init/glide/case2/README.md b/cmd/dep/testdata/harness_tests/init/glide/case2/README.md new file mode 100644 index 0000000000..f6e3eaf4fa --- /dev/null +++ b/cmd/dep/testdata/harness_tests/init/glide/case2/README.md @@ -0,0 +1 @@ +Ignore glide config at root when -skip-tools is specified. \ No newline at end of file diff --git a/cmd/dep/testdata/harness_tests/init/glide/case2/final/Gopkg.lock b/cmd/dep/testdata/harness_tests/init/glide/case2/final/Gopkg.lock new file mode 100644 index 0000000000..ac445c05d2 --- /dev/null +++ b/cmd/dep/testdata/harness_tests/init/glide/case2/final/Gopkg.lock @@ -0,0 +1,21 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + name = "github.com/sdboyer/deptest" + packages = ["."] + revision = "3f4c3bea144e112a69bbe5d8d01c1b09a544253f" + version = "v0.8.1" + +[[projects]] + name = "github.com/sdboyer/deptestdos" + packages = ["."] + revision = "5c607206be5decd28e6263ffffdcee067266015e" + version = "v2.0.0" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + inputs-digest = "1ed417a0bec57ffe988fae1cba8f3d49994fb893394d61844e0b3c96d69573fe" + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/cmd/dep/testdata/harness_tests/init/glide/case2/final/Gopkg.toml b/cmd/dep/testdata/harness_tests/init/glide/case2/final/Gopkg.toml new file mode 100644 index 0000000000..aaf78303fa --- /dev/null +++ b/cmd/dep/testdata/harness_tests/init/glide/case2/final/Gopkg.toml @@ -0,0 +1,4 @@ + +[[constraint]] + name = "github.com/sdboyer/deptestdos" + version = "2.0.0" diff --git a/cmd/dep/testdata/harness_tests/init/glide/case2/initial/glide.lock b/cmd/dep/testdata/harness_tests/init/glide/case2/initial/glide.lock new file mode 100644 index 0000000000..1d29509927 --- /dev/null +++ b/cmd/dep/testdata/harness_tests/init/glide/case2/initial/glide.lock @@ -0,0 +1,12 @@ +hash: 16053c82a71f9bd509b05a4523df6bc418aed2083e4b8bd97a870bbc003256f8 +updated: 2017-03-07T17:02:32.214383898-06:00 +imports: +- name: github.com/sdboyer/deptest + repo: https://github.com/sdboyer/deptest.git + vcs: git + version: ff2948a2ac8f538c4ecd55962e919d1e13e74baf +- name: github.com/sdboyer/deptestdos + version: 5c607206be5decd28e6263ffffdcee067266015e +testImports: +- name: github.com/golang/lint + version: cb00e5669539f047b2f4c53a421a01b0c8e172c6 diff --git a/cmd/dep/testdata/harness_tests/init/glide/case2/initial/glide.yaml b/cmd/dep/testdata/harness_tests/init/glide/case2/initial/glide.yaml new file mode 100644 index 0000000000..ee269a398d --- /dev/null +++ b/cmd/dep/testdata/harness_tests/init/glide/case2/initial/glide.yaml @@ -0,0 +1,20 @@ +package: github.com/golang/notexist +homepage: http://example.com +license: MIT +owners: +- name: Sam Boyer + email: sdboyer@example.com + homepage: http://sdboyer.io +ignore: +- github.com/sdboyer/dep-test +excludeDirs: +- samples +import: +- package: github.com/sdboyer/deptest # This is a transitive dep and will be ignored + repo: https://github.com/sdboyer/deptest.git + vcs: git + version: v1.0.0 +- package: github.com/sdboyer/deptestdos + version: v2.0.0 +testImport: +- package: github.com/golang/lint diff --git a/cmd/dep/testdata/harness_tests/remove/force/case1/initial/main.go b/cmd/dep/testdata/harness_tests/init/glide/case2/initial/main.go similarity index 51% rename from cmd/dep/testdata/harness_tests/remove/force/case1/initial/main.go rename to cmd/dep/testdata/harness_tests/init/glide/case2/initial/main.go index 2eae5b511d..2b2c7c396e 100644 --- a/cmd/dep/testdata/harness_tests/remove/force/case1/initial/main.go +++ b/cmd/dep/testdata/harness_tests/init/glide/case2/initial/main.go @@ -1,18 +1,16 @@ -// Copyright 2016 The Go Authors. All rights reserved. +// Copyright 2017 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package main import ( - "github.com/sdboyer/deptest" + "fmt" + "github.com/sdboyer/deptestdos" ) func main() { - err := nil - if err != nil { - deptest.Map["yo yo!"] - } - deptestdos.diMeLo("whatev") + var x deptestdos.Bar + fmt.Println(x) } diff --git a/cmd/dep/testdata/harness_tests/init/glide/case2/testcase.json b/cmd/dep/testdata/harness_tests/init/glide/case2/testcase.json new file mode 100644 index 0000000000..5c387ed5f7 --- /dev/null +++ b/cmd/dep/testdata/harness_tests/init/glide/case2/testcase.json @@ -0,0 +1,14 @@ +{ + "commands": [ + ["init", "-no-examples", "-skip-tools"] + ], + "error-expected": "", + "gopath-initial": { + "github.com/sdboyer/deptest": "3f4c3bea144e112a69bbe5d8d01c1b09a544253f", + "github.com/sdboyer/deptestdos": "5c607206be5decd28e6263ffffdcee067266015e" + }, + "vendor-final": [ + "github.com/sdboyer/deptest", + "github.com/sdboyer/deptestdos" + ] +} diff --git a/cmd/dep/testdata/harness_tests/init/glide/case3/README.md b/cmd/dep/testdata/harness_tests/init/glide/case3/README.md new file mode 100644 index 0000000000..f46ccd1949 --- /dev/null +++ b/cmd/dep/testdata/harness_tests/init/glide/case3/README.md @@ -0,0 +1 @@ +Import glide config in dependencies. \ No newline at end of file diff --git a/cmd/dep/testdata/harness_tests/init/glide/case3/final/Gopkg.lock b/cmd/dep/testdata/harness_tests/init/glide/case3/final/Gopkg.lock new file mode 100644 index 0000000000..440c282602 --- /dev/null +++ b/cmd/dep/testdata/harness_tests/init/glide/case3/final/Gopkg.lock @@ -0,0 +1,21 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + name = "github.com/carolynvs/deptestglide" + packages = ["."] + revision = "aa7fea6e17ca281c6f210afb93fc3c98ef29a695" + version = "v0.1.1" + +[[projects]] + name = "github.com/sdboyer/deptest" + packages = ["."] + revision = "3f4c3bea144e112a69bbe5d8d01c1b09a544253f" + version = "v0.8.1" + +[solve-meta] + analyzer-name = "dep+import" + analyzer-version = 1 + inputs-digest = "07eddb7ff09071bde95b019911e1fb30d91bddabbb23f4c797c6ce61b58cd7be" + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/cmd/dep/testdata/harness_tests/init/glide/case3/final/Gopkg.toml b/cmd/dep/testdata/harness_tests/init/glide/case3/final/Gopkg.toml new file mode 100644 index 0000000000..9b31cba7ce --- /dev/null +++ b/cmd/dep/testdata/harness_tests/init/glide/case3/final/Gopkg.toml @@ -0,0 +1,4 @@ + +[[constraint]] + name = "github.com/carolynvs/deptestglide" + version = "0.1.1" diff --git a/cmd/dep/testdata/harness_tests/init/glide/case3/initial/main.go b/cmd/dep/testdata/harness_tests/init/glide/case3/initial/main.go new file mode 100644 index 0000000000..3e509998f7 --- /dev/null +++ b/cmd/dep/testdata/harness_tests/init/glide/case3/initial/main.go @@ -0,0 +1,16 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "fmt" + + "github.com/carolynvs/deptestglide" +) + +func main() { + var x = deptestglide.MyFoo + fmt.Println(x) +} diff --git a/cmd/dep/testdata/harness_tests/init/glide/case3/testcase.json.ignore b/cmd/dep/testdata/harness_tests/init/glide/case3/testcase.json.ignore new file mode 100644 index 0000000000..e3e5156036 --- /dev/null +++ b/cmd/dep/testdata/harness_tests/init/glide/case3/testcase.json.ignore @@ -0,0 +1,11 @@ +{ + "commands": [ + ["init", "-no-examples"] + ], + "error-expected": "", + "gopath-initial": {}, + "vendor-final": [ + "github.com/carolynvs/deptestglide", + "github.com/sdboyer/deptest" + ] +} diff --git a/cmd/dep/testdata/harness_tests/init/manifest-exists/testcase.json b/cmd/dep/testdata/harness_tests/init/manifest-exists/testcase.json index 08322d2a49..3958059fbb 100644 --- a/cmd/dep/testdata/harness_tests/init/manifest-exists/testcase.json +++ b/cmd/dep/testdata/harness_tests/init/manifest-exists/testcase.json @@ -1,6 +1,7 @@ { - "commands": [ - ["init"] - ], - "error-expected" : "manifest already exists:" - } \ No newline at end of file + "commands": [ + ["init"] + ], + "error-expected": "manifest already exists:", + "vendor-final": [] +} diff --git a/cmd/dep/testdata/harness_tests/init/skip-hidden/final/Gopkg.lock b/cmd/dep/testdata/harness_tests/init/skip-hidden/final/Gopkg.lock index 2b9c20975d..c7f497e7a1 100644 --- a/cmd/dep/testdata/harness_tests/init/skip-hidden/final/Gopkg.lock +++ b/cmd/dep/testdata/harness_tests/init/skip-hidden/final/Gopkg.lock @@ -1,7 +1,15 @@ -memo = "14b07b05e0f01051b03887ab2bf80b516bc5510ea92f75f76c894b1745d8850c" +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + [[projects]] name = "github.com/sdboyer/deptest" packages = ["."] revision = "ff2948a2ac8f538c4ecd55962e919d1e13e74baf" version = "v1.0.0" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + inputs-digest = "14b07b05e0f01051b03887ab2bf80b516bc5510ea92f75f76c894b1745d8850c" + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/cmd/dep/testdata/harness_tests/init/skip-hidden/final/Gopkg.toml b/cmd/dep/testdata/harness_tests/init/skip-hidden/final/Gopkg.toml index d5f3e3c9d6..e242e02114 100644 --- a/cmd/dep/testdata/harness_tests/init/skip-hidden/final/Gopkg.toml +++ b/cmd/dep/testdata/harness_tests/init/skip-hidden/final/Gopkg.toml @@ -1,4 +1,4 @@ -[[dependencies]] +[[constraint]] name = "github.com/sdboyer/deptest" - version = "^1.0.0" + version = "1.0.0" diff --git a/cmd/dep/testdata/harness_tests/init/skip-hidden/testcase.json b/cmd/dep/testdata/harness_tests/init/skip-hidden/testcase.json index 283604258a..0283a8839f 100644 --- a/cmd/dep/testdata/harness_tests/init/skip-hidden/testcase.json +++ b/cmd/dep/testdata/harness_tests/init/skip-hidden/testcase.json @@ -1,7 +1,8 @@ { "commands": [ - ["init", "-no-examples"] + ["init", "-no-examples", "-skip-tools"] ], + "error-expected": "", "vendor-final": [ "github.com/sdboyer/deptest" ] diff --git a/cmd/dep/testdata/harness_tests/init/usage/with_h_flag/testcase.json b/cmd/dep/testdata/harness_tests/init/usage/with_h_flag/testcase.json new file mode 100644 index 0000000000..b954a4827a --- /dev/null +++ b/cmd/dep/testdata/harness_tests/init/usage/with_h_flag/testcase.json @@ -0,0 +1,6 @@ +{ + "commands": [ + ["init", "-h"] + ], + "error-expected": "Usage: dep init [root]" +} diff --git a/cmd/dep/testdata/harness_tests/init/usage/with_not_defined_flag/testcase.json b/cmd/dep/testdata/harness_tests/init/usage/with_not_defined_flag/testcase.json new file mode 100644 index 0000000000..3d3591e971 --- /dev/null +++ b/cmd/dep/testdata/harness_tests/init/usage/with_not_defined_flag/testcase.json @@ -0,0 +1,6 @@ +{ + "commands": [ + ["init", "-not-defined-flag"] + ], + "error-expected": "flag provided but not defined: -not-defined-flag\nUsage: dep init [root]" +} diff --git a/cmd/dep/testdata/harness_tests/prune/without_lock/final/Gopkg.toml b/cmd/dep/testdata/harness_tests/prune/without_lock/final/Gopkg.toml new file mode 100644 index 0000000000..94deb714a4 --- /dev/null +++ b/cmd/dep/testdata/harness_tests/prune/without_lock/final/Gopkg.toml @@ -0,0 +1,3 @@ +[[constraint]] + name = "github.com/sdboyer/deptest" + version = "^0.8.0" diff --git a/cmd/dep/testdata/harness_tests/prune/without_lock/initial/Gopkg.toml b/cmd/dep/testdata/harness_tests/prune/without_lock/initial/Gopkg.toml new file mode 100644 index 0000000000..94deb714a4 --- /dev/null +++ b/cmd/dep/testdata/harness_tests/prune/without_lock/initial/Gopkg.toml @@ -0,0 +1,3 @@ +[[constraint]] + name = "github.com/sdboyer/deptest" + version = "^0.8.0" diff --git a/cmd/dep/testdata/harness_tests/prune/without_lock/testcase.json b/cmd/dep/testdata/harness_tests/prune/without_lock/testcase.json new file mode 100644 index 0000000000..da6463ed6a --- /dev/null +++ b/cmd/dep/testdata/harness_tests/prune/without_lock/testcase.json @@ -0,0 +1,6 @@ +{ + "commands": [ + ["prune"] + ], + "error-expected": "Gopkg.lock must exist" +} diff --git a/cmd/dep/testdata/harness_tests/remove/force/case1/final/Gopkg.toml b/cmd/dep/testdata/harness_tests/remove/force/case1/final/Gopkg.toml deleted file mode 100644 index 413feb4eb0..0000000000 --- a/cmd/dep/testdata/harness_tests/remove/force/case1/final/Gopkg.toml +++ /dev/null @@ -1,11 +0,0 @@ -[[dependencies]] - name = "github.com/not/used" - version = "2.0.0" - -[[dependencies]] - name = "github.com/sdboyer/deptest" - version = ">=0.8.0, <1.0.0" - -[[dependencies]] - name = "github.com/sdboyer/deptestdos" - revision = "a0196baa11ea047dd65037287451d36b861b00ea" diff --git a/cmd/dep/testdata/harness_tests/remove/force/case1/initial/Gopkg.toml b/cmd/dep/testdata/harness_tests/remove/force/case1/initial/Gopkg.toml deleted file mode 100644 index 413feb4eb0..0000000000 --- a/cmd/dep/testdata/harness_tests/remove/force/case1/initial/Gopkg.toml +++ /dev/null @@ -1,11 +0,0 @@ -[[dependencies]] - name = "github.com/not/used" - version = "2.0.0" - -[[dependencies]] - name = "github.com/sdboyer/deptest" - version = ">=0.8.0, <1.0.0" - -[[dependencies]] - name = "github.com/sdboyer/deptestdos" - revision = "a0196baa11ea047dd65037287451d36b861b00ea" diff --git a/cmd/dep/testdata/harness_tests/remove/force/case1/testcase.json b/cmd/dep/testdata/harness_tests/remove/force/case1/testcase.json deleted file mode 100644 index 84447c9b42..0000000000 --- a/cmd/dep/testdata/harness_tests/remove/force/case1/testcase.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "commands": [ - ["remove", "-force", "github.com/sdboyer/deptestdos", "github.com/not/used"] - ], - "vendor-final": [ - "github.com/sdboyer/deptest", - "github.com/sdboyer/deptestdos" - ] -} diff --git a/cmd/dep/testdata/harness_tests/remove/specific/case1/final/Gopkg.lock b/cmd/dep/testdata/harness_tests/remove/specific/case1/final/Gopkg.lock deleted file mode 100644 index 89372cc518..0000000000 --- a/cmd/dep/testdata/harness_tests/remove/specific/case1/final/Gopkg.lock +++ /dev/null @@ -1,12 +0,0 @@ -memo = "d414dbf5fc668c1085effa68372d02e54b23d058cc66f9fd19ba094c6a946d9b" - -[[projects]] - name = "github.com/sdboyer/deptest" - packages = ["."] - revision = "ff2948a2ac8f538c4ecd55962e919d1e13e74baf" - version = "v1.0.0" - -[[projects]] - name = "github.com/sdboyer/deptestdos" - packages = ["."] - revision = "a0196baa11ea047dd65037287451d36b861b00ea" diff --git a/cmd/dep/testdata/harness_tests/remove/specific/case1/final/Gopkg.toml b/cmd/dep/testdata/harness_tests/remove/specific/case1/final/Gopkg.toml deleted file mode 100644 index 26653704de..0000000000 --- a/cmd/dep/testdata/harness_tests/remove/specific/case1/final/Gopkg.toml +++ /dev/null @@ -1,11 +0,0 @@ -[[dependencies]] - name = "github.com/not/used" - version = "2.0.0" - -[[dependencies]] -name = "github.com/sdboyer/deptest" - version = ">=0.8.0, <1.0.0" - -[[dependencies]] -name = "github.com/sdboyer/deptestdos" - revision = "a0196baa11ea047dd65037287451d36b861b00ea" \ No newline at end of file diff --git a/cmd/dep/testdata/harness_tests/remove/specific/case1/initial/Gopkg.toml b/cmd/dep/testdata/harness_tests/remove/specific/case1/initial/Gopkg.toml deleted file mode 100644 index 26653704de..0000000000 --- a/cmd/dep/testdata/harness_tests/remove/specific/case1/initial/Gopkg.toml +++ /dev/null @@ -1,11 +0,0 @@ -[[dependencies]] - name = "github.com/not/used" - version = "2.0.0" - -[[dependencies]] -name = "github.com/sdboyer/deptest" - version = ">=0.8.0, <1.0.0" - -[[dependencies]] -name = "github.com/sdboyer/deptestdos" - revision = "a0196baa11ea047dd65037287451d36b861b00ea" \ No newline at end of file diff --git a/cmd/dep/testdata/harness_tests/remove/specific/case1/testcase.json b/cmd/dep/testdata/harness_tests/remove/specific/case1/testcase.json deleted file mode 100644 index 327867d481..0000000000 --- a/cmd/dep/testdata/harness_tests/remove/specific/case1/testcase.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "commands": [ - ["remove", "github.com/not/used"] - ], - "vendor-final": [ - "github.com/sdboyer/deptest", - "github.com/sdboyer/deptestdos" - ] -} diff --git a/cmd/dep/testdata/harness_tests/remove/specific/case2/final/Gopkg.toml b/cmd/dep/testdata/harness_tests/remove/specific/case2/final/Gopkg.toml deleted file mode 100644 index 0bb0ab1d1b..0000000000 --- a/cmd/dep/testdata/harness_tests/remove/specific/case2/final/Gopkg.toml +++ /dev/null @@ -1,11 +0,0 @@ -[[dependencies]] - name = "github.com/not/used" - version = "2.0.0" - -[[dependencies]] - name = "github.com/sdboyer/deptest" - version = ">=0.8.0, <1.0.0" - -[[dependencies]] - name = "github.com/sdboyer/deptestdos" - revision = "a0196baa11ea047dd65037287451d36b861b00ea" \ No newline at end of file diff --git a/cmd/dep/testdata/harness_tests/remove/specific/case2/initial/Gopkg.toml b/cmd/dep/testdata/harness_tests/remove/specific/case2/initial/Gopkg.toml deleted file mode 100644 index 0bb0ab1d1b..0000000000 --- a/cmd/dep/testdata/harness_tests/remove/specific/case2/initial/Gopkg.toml +++ /dev/null @@ -1,11 +0,0 @@ -[[dependencies]] - name = "github.com/not/used" - version = "2.0.0" - -[[dependencies]] - name = "github.com/sdboyer/deptest" - version = ">=0.8.0, <1.0.0" - -[[dependencies]] - name = "github.com/sdboyer/deptestdos" - revision = "a0196baa11ea047dd65037287451d36b861b00ea" \ No newline at end of file diff --git a/cmd/dep/testdata/harness_tests/remove/unused/case1/final/Gopkg.lock b/cmd/dep/testdata/harness_tests/remove/unused/case1/final/Gopkg.lock deleted file mode 100644 index 89372cc518..0000000000 --- a/cmd/dep/testdata/harness_tests/remove/unused/case1/final/Gopkg.lock +++ /dev/null @@ -1,12 +0,0 @@ -memo = "d414dbf5fc668c1085effa68372d02e54b23d058cc66f9fd19ba094c6a946d9b" - -[[projects]] - name = "github.com/sdboyer/deptest" - packages = ["."] - revision = "ff2948a2ac8f538c4ecd55962e919d1e13e74baf" - version = "v1.0.0" - -[[projects]] - name = "github.com/sdboyer/deptestdos" - packages = ["."] - revision = "a0196baa11ea047dd65037287451d36b861b00ea" diff --git a/cmd/dep/testdata/harness_tests/remove/unused/case1/final/Gopkg.toml b/cmd/dep/testdata/harness_tests/remove/unused/case1/final/Gopkg.toml deleted file mode 100644 index 0bb0ab1d1b..0000000000 --- a/cmd/dep/testdata/harness_tests/remove/unused/case1/final/Gopkg.toml +++ /dev/null @@ -1,11 +0,0 @@ -[[dependencies]] - name = "github.com/not/used" - version = "2.0.0" - -[[dependencies]] - name = "github.com/sdboyer/deptest" - version = ">=0.8.0, <1.0.0" - -[[dependencies]] - name = "github.com/sdboyer/deptestdos" - revision = "a0196baa11ea047dd65037287451d36b861b00ea" \ No newline at end of file diff --git a/cmd/dep/testdata/harness_tests/remove/unused/case1/initial/Gopkg.toml b/cmd/dep/testdata/harness_tests/remove/unused/case1/initial/Gopkg.toml deleted file mode 100644 index 0bb0ab1d1b..0000000000 --- a/cmd/dep/testdata/harness_tests/remove/unused/case1/initial/Gopkg.toml +++ /dev/null @@ -1,11 +0,0 @@ -[[dependencies]] - name = "github.com/not/used" - version = "2.0.0" - -[[dependencies]] - name = "github.com/sdboyer/deptest" - version = ">=0.8.0, <1.0.0" - -[[dependencies]] - name = "github.com/sdboyer/deptestdos" - revision = "a0196baa11ea047dd65037287451d36b861b00ea" \ No newline at end of file diff --git a/cmd/dep/testdata/harness_tests/remove/unused/case1/testcase.json b/cmd/dep/testdata/harness_tests/remove/unused/case1/testcase.json deleted file mode 100644 index 23cc6142a5..0000000000 --- a/cmd/dep/testdata/harness_tests/remove/unused/case1/testcase.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "commands": [ - ["remove", "-unused"] - ], - "vendor-final": [ - "github.com/sdboyer/deptest", - "github.com/sdboyer/deptestdos" - ] -} diff --git a/cmd/dep/testdata/harness_tests/status/case1/dot/final/Gopkg.lock b/cmd/dep/testdata/harness_tests/status/case1/dot/final/Gopkg.lock index 9b7e4cbf29..77278d07bc 100644 --- a/cmd/dep/testdata/harness_tests/status/case1/dot/final/Gopkg.lock +++ b/cmd/dep/testdata/harness_tests/status/case1/dot/final/Gopkg.lock @@ -1,4 +1,5 @@ -memo = "88d2718cda70cce45158f953d2c6ead79c1db38e67e9704aff72be8fddb096e7" +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + [[projects]] name = "github.com/sdboyer/deptest" @@ -11,3 +12,10 @@ memo = "88d2718cda70cce45158f953d2c6ead79c1db38e67e9704aff72be8fddb096e7" packages = ["."] revision = "5c607206be5decd28e6263ffffdcee067266015e" version = "v2.0.0" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + inputs-digest = "1b381263a360eafafe3ef7f9be626672668d17250a3c9a8debd169d1b5e2eebb" + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/cmd/dep/testdata/harness_tests/status/case1/dot/final/Gopkg.toml b/cmd/dep/testdata/harness_tests/status/case1/dot/final/Gopkg.toml index 122d0340fd..94deb714a4 100644 --- a/cmd/dep/testdata/harness_tests/status/case1/dot/final/Gopkg.toml +++ b/cmd/dep/testdata/harness_tests/status/case1/dot/final/Gopkg.toml @@ -1,3 +1,3 @@ -[[dependencies]] +[[constraint]] name = "github.com/sdboyer/deptest" version = "^0.8.0" diff --git a/cmd/dep/testdata/harness_tests/status/case1/dot/initial/Gopkg.toml b/cmd/dep/testdata/harness_tests/status/case1/dot/initial/Gopkg.toml index 122d0340fd..94deb714a4 100644 --- a/cmd/dep/testdata/harness_tests/status/case1/dot/initial/Gopkg.toml +++ b/cmd/dep/testdata/harness_tests/status/case1/dot/initial/Gopkg.toml @@ -1,3 +1,3 @@ -[[dependencies]] +[[constraint]] name = "github.com/sdboyer/deptest" version = "^0.8.0" diff --git a/cmd/dep/testdata/harness_tests/status/case1/dot/testcase.json b/cmd/dep/testdata/harness_tests/status/case1/dot/testcase.json index 8295f9bed1..9634c3981a 100644 --- a/cmd/dep/testdata/harness_tests/status/case1/dot/testcase.json +++ b/cmd/dep/testdata/harness_tests/status/case1/dot/testcase.json @@ -1,8 +1,9 @@ { "commands": [ ["ensure"], - ["status","-dot"] + ["status", "-dot"] ], + "error-expected": "", "vendor-final": [ "github.com/sdboyer/deptest", "github.com/sdboyer/deptestdos" diff --git a/cmd/dep/testdata/harness_tests/status/case1/json/final/Gopkg.lock b/cmd/dep/testdata/harness_tests/status/case1/json/final/Gopkg.lock index 9b7e4cbf29..77278d07bc 100644 --- a/cmd/dep/testdata/harness_tests/status/case1/json/final/Gopkg.lock +++ b/cmd/dep/testdata/harness_tests/status/case1/json/final/Gopkg.lock @@ -1,4 +1,5 @@ -memo = "88d2718cda70cce45158f953d2c6ead79c1db38e67e9704aff72be8fddb096e7" +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + [[projects]] name = "github.com/sdboyer/deptest" @@ -11,3 +12,10 @@ memo = "88d2718cda70cce45158f953d2c6ead79c1db38e67e9704aff72be8fddb096e7" packages = ["."] revision = "5c607206be5decd28e6263ffffdcee067266015e" version = "v2.0.0" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + inputs-digest = "1b381263a360eafafe3ef7f9be626672668d17250a3c9a8debd169d1b5e2eebb" + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/cmd/dep/testdata/harness_tests/status/case1/json/final/Gopkg.toml b/cmd/dep/testdata/harness_tests/status/case1/json/final/Gopkg.toml index 122d0340fd..94deb714a4 100644 --- a/cmd/dep/testdata/harness_tests/status/case1/json/final/Gopkg.toml +++ b/cmd/dep/testdata/harness_tests/status/case1/json/final/Gopkg.toml @@ -1,3 +1,3 @@ -[[dependencies]] +[[constraint]] name = "github.com/sdboyer/deptest" version = "^0.8.0" diff --git a/cmd/dep/testdata/harness_tests/status/case1/json/initial/Gopkg.toml b/cmd/dep/testdata/harness_tests/status/case1/json/initial/Gopkg.toml index 122d0340fd..94deb714a4 100644 --- a/cmd/dep/testdata/harness_tests/status/case1/json/initial/Gopkg.toml +++ b/cmd/dep/testdata/harness_tests/status/case1/json/initial/Gopkg.toml @@ -1,3 +1,3 @@ -[[dependencies]] +[[constraint]] name = "github.com/sdboyer/deptest" version = "^0.8.0" diff --git a/cmd/dep/testdata/harness_tests/status/case1/json/testcase.json b/cmd/dep/testdata/harness_tests/status/case1/json/testcase.json index 2444d2888c..9e1a0643b7 100644 --- a/cmd/dep/testdata/harness_tests/status/case1/json/testcase.json +++ b/cmd/dep/testdata/harness_tests/status/case1/json/testcase.json @@ -1,8 +1,9 @@ { "commands": [ ["ensure"], - ["status","-json"] + ["status", "-json"] ], + "error-expected": "", "vendor-final": [ "github.com/sdboyer/deptest", "github.com/sdboyer/deptestdos" diff --git a/cmd/dep/testdata/harness_tests/status/case1/table/final/Gopkg.lock b/cmd/dep/testdata/harness_tests/status/case1/table/final/Gopkg.lock index 9b7e4cbf29..77278d07bc 100644 --- a/cmd/dep/testdata/harness_tests/status/case1/table/final/Gopkg.lock +++ b/cmd/dep/testdata/harness_tests/status/case1/table/final/Gopkg.lock @@ -1,4 +1,5 @@ -memo = "88d2718cda70cce45158f953d2c6ead79c1db38e67e9704aff72be8fddb096e7" +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + [[projects]] name = "github.com/sdboyer/deptest" @@ -11,3 +12,10 @@ memo = "88d2718cda70cce45158f953d2c6ead79c1db38e67e9704aff72be8fddb096e7" packages = ["."] revision = "5c607206be5decd28e6263ffffdcee067266015e" version = "v2.0.0" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + inputs-digest = "1b381263a360eafafe3ef7f9be626672668d17250a3c9a8debd169d1b5e2eebb" + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/cmd/dep/testdata/harness_tests/status/case1/table/final/Gopkg.toml b/cmd/dep/testdata/harness_tests/status/case1/table/final/Gopkg.toml index 122d0340fd..94deb714a4 100644 --- a/cmd/dep/testdata/harness_tests/status/case1/table/final/Gopkg.toml +++ b/cmd/dep/testdata/harness_tests/status/case1/table/final/Gopkg.toml @@ -1,3 +1,3 @@ -[[dependencies]] +[[constraint]] name = "github.com/sdboyer/deptest" version = "^0.8.0" diff --git a/cmd/dep/testdata/harness_tests/status/case1/table/initial/Gopkg.toml b/cmd/dep/testdata/harness_tests/status/case1/table/initial/Gopkg.toml index 122d0340fd..94deb714a4 100644 --- a/cmd/dep/testdata/harness_tests/status/case1/table/initial/Gopkg.toml +++ b/cmd/dep/testdata/harness_tests/status/case1/table/initial/Gopkg.toml @@ -1,3 +1,3 @@ -[[dependencies]] +[[constraint]] name = "github.com/sdboyer/deptest" version = "^0.8.0" diff --git a/cmd/dep/testdata/harness_tests/status/case1/table/stdout.txt b/cmd/dep/testdata/harness_tests/status/case1/table/stdout.txt index f78a4eb4b1..f77f65a927 100644 --- a/cmd/dep/testdata/harness_tests/status/case1/table/stdout.txt +++ b/cmd/dep/testdata/harness_tests/status/case1/table/stdout.txt @@ -1,3 +1,3 @@ -PROJECT CONSTRAINT VERSION REVISION LATEST PKGS USED -github.com/sdboyer/deptest >=0.8.0, <1.0.0 v0.8.0 ff2948a 3f4c3be 1 -github.com/sdboyer/deptestdos * v2.0.0 5c60720 5c60720 1 +PROJECT CONSTRAINT VERSION REVISION LATEST PKGS USED +github.com/sdboyer/deptest ^0.8.0 v0.8.0 ff2948a 3f4c3be 1 +github.com/sdboyer/deptestdos * v2.0.0 5c60720 5c60720 1 diff --git a/cmd/dep/testdata/harness_tests/status/case1/table/testcase.json b/cmd/dep/testdata/harness_tests/status/case1/table/testcase.json index 813db298bb..e1f1eadeeb 100644 --- a/cmd/dep/testdata/harness_tests/status/case1/table/testcase.json +++ b/cmd/dep/testdata/harness_tests/status/case1/table/testcase.json @@ -3,6 +3,7 @@ ["ensure"], ["status"] ], + "error-expected": "", "vendor-final": [ "github.com/sdboyer/deptest", "github.com/sdboyer/deptestdos" diff --git a/cmd/dep/testdata/init_path_tests/relative_path/final/project_dir/Gopkg.lock b/cmd/dep/testdata/init_path_tests/relative_path/final/project_dir/Gopkg.lock new file mode 100644 index 0000000000..f9f6c4c7f8 --- /dev/null +++ b/cmd/dep/testdata/init_path_tests/relative_path/final/project_dir/Gopkg.lock @@ -0,0 +1,14 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + +memo = "af9a783a5430dabcaaf44683c09e2b729e1c0d61f13bfdf6677c4fd0b41387ca" + +[[projects]] + branch = "master" + name = "github.com/sdboyer/deptest" + packages = ["."] + revision = "3f4c3bea144e112a69bbe5d8d01c1b09a544253f" + +[[projects]] + name = "github.com/sdboyer/deptestdos" + packages = ["."] + revision = "a0196baa11ea047dd65037287451d36b861b00ea" diff --git a/cmd/dep/testdata/init_path_tests/relative_path/final/project_dir/Gopkg.toml b/cmd/dep/testdata/init_path_tests/relative_path/final/project_dir/Gopkg.toml new file mode 100644 index 0000000000..a90dd2dadd --- /dev/null +++ b/cmd/dep/testdata/init_path_tests/relative_path/final/project_dir/Gopkg.toml @@ -0,0 +1,4 @@ + +[[constraint]] + branch = "master" + name = "github.com/sdboyer/deptest" diff --git a/cmd/dep/testdata/init_path_tests/relative_path/initial/project_dir/foo/bar.go b/cmd/dep/testdata/init_path_tests/relative_path/initial/project_dir/foo/bar.go new file mode 100644 index 0000000000..c1ed69fc65 --- /dev/null +++ b/cmd/dep/testdata/init_path_tests/relative_path/initial/project_dir/foo/bar.go @@ -0,0 +1,13 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package foo + +import "github.com/sdboyer/deptest" + +func Foo() deptest.Foo { + var y deptest.Foo + + return y +} diff --git a/cmd/dep/testdata/harness_tests/remove/unused/case1/initial/main.go b/cmd/dep/testdata/init_path_tests/relative_path/initial/project_dir/main.go similarity index 51% rename from cmd/dep/testdata/harness_tests/remove/unused/case1/initial/main.go rename to cmd/dep/testdata/init_path_tests/relative_path/initial/project_dir/main.go index 2eae5b511d..150e0bd3df 100644 --- a/cmd/dep/testdata/harness_tests/remove/unused/case1/initial/main.go +++ b/cmd/dep/testdata/init_path_tests/relative_path/initial/project_dir/main.go @@ -1,18 +1,18 @@ -// Copyright 2016 The Go Authors. All rights reserved. +// Copyright 2017 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package main import ( - "github.com/sdboyer/deptest" + "fmt" + "github.com/sdboyer/deptestdos" ) func main() { - err := nil - if err != nil { - deptest.Map["yo yo!"] - } - deptestdos.diMeLo("whatev") + var x deptestdos.Bar + y := foo.FooFunc() + + fmt.Println(x, y) } diff --git a/cmd/dep/testdata/init_path_tests/relative_path/testcase.json b/cmd/dep/testdata/init_path_tests/relative_path/testcase.json new file mode 100644 index 0000000000..6fa477b508 --- /dev/null +++ b/cmd/dep/testdata/init_path_tests/relative_path/testcase.json @@ -0,0 +1,7 @@ +{ + "commands": [ + ["init", "-skip-tools", "project_dir"] + ], + "error-expected": "", + "init-path": "project_dir" +} diff --git a/context.go b/context.go index c915bc9608..0aa96afbd4 100644 --- a/context.go +++ b/context.go @@ -12,7 +12,7 @@ import ( "strings" "github.com/Masterminds/vcs" - "github.com/golang/dep/internal" + "github.com/golang/dep/internal/fs" "github.com/golang/dep/internal/gps" "github.com/pkg/errors" ) @@ -44,7 +44,7 @@ func NewContext(wd string, env []string, loggers *Loggers) (*Ctx, error) { for _, gp := range filepath.SplitList(GOPATH) { gp = filepath.FromSlash(gp) - if internal.HasFilepathPrefix(filepath.FromSlash(wd), gp) { + if fs.HasFilepathPrefix(filepath.FromSlash(wd), gp) { ctx.GOPATH = gp } @@ -98,32 +98,18 @@ func (c *Ctx) SourceManager() (*gps.SourceMgr, error) { return gps.NewSourceManager(filepath.Join(c.GOPATH, "pkg", "dep")) } -// LoadProject takes a path and searches up the directory tree for -// a project root. If an absolute path is given, the search begins in that -// directory. If a relative or empty path is given, the search start is computed -// from the current working directory. The search stops when a file with the -// name ManifestName (Gopkg.toml, by default) is located. +// LoadProject starts from the current working directory and searches up the +// directory tree for a project root. The search stops when a file with the name +// ManifestName (Gopkg.toml, by default) is located. // // The Project contains the parsed manifest as well as a parsed lock file, if // present. The import path is calculated as the remaining path segment // below Ctx.GOPATH/src. -func (c *Ctx) LoadProject(path string) (*Project, error) { +func (c *Ctx) LoadProject() (*Project, error) { var err error p := new(Project) - if path != "" { - path, err = filepath.Abs(path) - if err != nil { - return nil, err - } - } - switch path { - case "": - p.AbsRoot, err = findProjectRoot(c.WorkingDir) - default: - p.AbsRoot, err = findProjectRoot(path) - } - + p.AbsRoot, err = findProjectRoot(c.WorkingDir) if err != nil { return nil, err } @@ -211,7 +197,7 @@ func (c *Ctx) resolveProjectRoot(path string) (string, error) { // Determine if the symlink is within any of the GOPATHs, in which case we're not // sure how to resolve it. for _, gp := range c.GOPATHS { - if internal.HasFilepathPrefix(path, gp) { + if fs.HasFilepathPrefix(path, gp) { return "", errors.Errorf("'%s' is linked to another path within a GOPATH (%s)", path, gp) } } @@ -226,7 +212,7 @@ func (c *Ctx) resolveProjectRoot(path string) (string, error) { // The second returned string indicates which GOPATH value was used. func (c *Ctx) SplitAbsoluteProjectRoot(path string) (string, error) { srcprefix := filepath.Join(c.GOPATH, "src") + string(filepath.Separator) - if internal.HasFilepathPrefix(path, srcprefix) { + if fs.HasFilepathPrefix(path, srcprefix) { if len(path) <= len(srcprefix) { return "", errors.New("dep does not currently support using $GOPATH/src as the project root.") } @@ -244,7 +230,7 @@ func (c *Ctx) SplitAbsoluteProjectRoot(path string) (string, error) { // package directory needs to exist. func (c *Ctx) absoluteProjectRoot(path string) (string, error) { posspath := filepath.Join(c.GOPATH, "src", path) - dirOK, err := IsDir(posspath) + dirOK, err := fs.IsDir(posspath) if err != nil { return "", errors.Wrapf(err, "checking if %s is a directory", posspath) } diff --git a/context_test.go b/context_test.go index 951c233da4..9709538031 100644 --- a/context_test.go +++ b/context_test.go @@ -188,47 +188,29 @@ func TestLoadProject(t *testing.T) { var testcases = []struct { lock bool start string - path string }{ - {true, filepath.Join("src", "test1"), ""}, //empty path, direct - {true, filepath.Join("src", "test1", "sub"), ""}, //empty path, ascending - {true, ".", filepath.Join(tg.Path("."), "src", "test1")}, //absolute path, direct - {true, ".", filepath.Join(tg.Path("."), "src", "test1", "sub")}, //absolute path, ascending - {true, ".", filepath.Join("src", "test1")}, //relative path from wd, direct - {true, ".", filepath.Join("src", "test1", "sub")}, //relative path from wd, ascending - {true, "src", "test1"}, //relative path from relative path, direct - {true, "src", filepath.Join("test1", "sub")}, //relative path from relative path, ascending - {false, filepath.Join("src", "test2"), ""}, //repeat without lockfile present - {false, filepath.Join("src", "test2", "sub"), ""}, - {false, ".", filepath.Join(tg.Path("."), "src", "test2")}, - {false, ".", filepath.Join(tg.Path("."), "src", "test2", "sub")}, - {false, ".", filepath.Join("src", "test2")}, - {false, ".", filepath.Join("src", "test2", "sub")}, - {false, "src", "test2"}, - {false, "src", filepath.Join("test2", "sub")}, + {true, filepath.Join("src", "test1")}, //direct + {true, filepath.Join("src", "test1", "sub")}, //ascending + {false, filepath.Join("src", "test2")}, //repeat without lockfile present + {false, filepath.Join("src", "test2", "sub")}, } for _, testcase := range testcases { start := testcase.start - path := testcase.path - tg.Cd(tg.Path(start)) - wd, err := os.Getwd() - if err != nil { - t.Fatal("failed to get working directory", err) - } + ctx := &Ctx{GOPATH: tg.Path("."), WorkingDir: tg.Path(start), Loggers: discardLoggers} - ctx := &Ctx{GOPATH: tg.Path("."), WorkingDir: wd, Loggers: discardLoggers} - - proj, err := ctx.LoadProject(path) + proj, err := ctx.LoadProject() tg.Must(err) - if proj.Manifest == nil { - t.Fatalf("Manifest file didn't load -> from: %q, path: %q", start, path) - } - if testcase.lock && proj.Lock == nil { - t.Fatalf("Lock file didn't load -> from: %q, path: %q", start, path) - } else if !testcase.lock && proj.Lock != nil { - t.Fatalf("Non-existent Lock file loaded -> from: %q, path: %q", start, path) + switch { + case err != nil: + t.Errorf("%s: LoadProject failed: %+v", start, err) + case proj.Manifest == nil: + t.Errorf("%s: Manifest file didn't load", start) + case testcase.lock && proj.Lock == nil: + t.Errorf("%s: Lock file didn't load", start) + case !testcase.lock && proj.Lock != nil: + t.Errorf("%s: Non-existent Lock file loaded", start) } } } @@ -247,31 +229,16 @@ func TestLoadProjectNotFoundErrors(t *testing.T) { start string path string }{ - {true, filepath.Join("src", "test1"), ""}, //empty path, direct - {true, filepath.Join("src", "test1", "sub"), ""}, //empty path, ascending - {true, ".", filepath.Join(tg.Path("."), "src", "test1")}, //absolute path, direct - {true, ".", filepath.Join(tg.Path("."), "src", "test1", "sub")}, //absolute path, ascending - {true, ".", filepath.Join("src", "test1")}, //relative path from wd, direct - {true, ".", filepath.Join("src", "test1", "sub")}, //relative path from wd, ascending - {true, "src", "test1"}, //relative path from relative path, direct - {true, "src", filepath.Join("test1", "sub")}, //relative path from relative path, ascending + {true, filepath.Join("src", "test1"), ""}, //direct + {true, filepath.Join("src", "test1", "sub"), ""}, //ascending } for _, testcase := range testcases { - start := testcase.start - path := testcase.path - tg.Cd(tg.Path(start)) - - wd, err := os.Getwd() - if err != nil { - t.Fatal("failed to get working directory", err) - } - - ctx := &Ctx{GOPATH: tg.Path("."), WorkingDir: wd} + ctx := &Ctx{GOPATH: tg.Path("."), WorkingDir: tg.Path(testcase.start)} - _, err = ctx.LoadProject(path) + _, err := ctx.LoadProject() if err == nil { - t.Fatalf("should have returned 'No Manifest Found' error -> from: %q, path: %q", start, path) + t.Errorf("%s: should have returned 'No Manifest Found' error", testcase.start) } } } @@ -282,7 +249,7 @@ func TestLoadProjectManifestParseError(t *testing.T) { tg.TempDir("src") tg.TempDir("src/test1") - tg.TempFile(filepath.Join("src/test1", ManifestName), `[[dependencies]]`) + tg.TempFile(filepath.Join("src/test1", ManifestName), `[[constraint]]`) tg.TempFile(filepath.Join("src/test1", LockName), `memo = "cdafe8641b28cd16fe025df278b0a49b9416859345d8b6ba0ace0272b74925ee"\n\n[[projects]]`) tg.Setenv("GOPATH", tg.Path(".")) @@ -296,7 +263,7 @@ func TestLoadProjectManifestParseError(t *testing.T) { ctx := &Ctx{GOPATH: tg.Path("."), WorkingDir: wd, Loggers: discardLoggers} - _, err = ctx.LoadProject("") + _, err = ctx.LoadProject() if err == nil { t.Fatal("should have returned 'Manifest Syntax' error") } @@ -308,7 +275,7 @@ func TestLoadProjectLockParseError(t *testing.T) { tg.TempDir("src") tg.TempDir("src/test1") - tg.TempFile(filepath.Join("src/test1", ManifestName), `[[dependencies]]`) + tg.TempFile(filepath.Join("src/test1", ManifestName), `[[constraint]]`) tg.TempFile(filepath.Join("src/test1", LockName), `memo = "cdafe8641b28cd16fe025df278b0a49b9416859345d8b6ba0ace0272b74925ee"\n\n[[projects]]`) tg.Setenv("GOPATH", tg.Path(".")) @@ -322,7 +289,7 @@ func TestLoadProjectLockParseError(t *testing.T) { ctx := &Ctx{GOPATH: tg.Path("."), WorkingDir: wd, Loggers: discardLoggers} - _, err = ctx.LoadProject("") + _, err = ctx.LoadProject() if err == nil { t.Fatal("should have returned 'Lock Syntax' error") } @@ -333,7 +300,7 @@ func TestLoadProjectNoSrcDir(t *testing.T) { defer tg.Cleanup() tg.TempDir("test1") - tg.TempFile(filepath.Join("test1", ManifestName), `[[dependencies]]`) + tg.TempFile(filepath.Join("test1", ManifestName), `[[constraint]]`) tg.TempFile(filepath.Join("test1", LockName), `memo = "cdafe8641b28cd16fe025df278b0a49b9416859345d8b6ba0ace0272b74925ee"\n\n[[projects]]`) tg.Setenv("GOPATH", tg.Path(".")) @@ -344,7 +311,7 @@ func TestLoadProjectNoSrcDir(t *testing.T) { f, _ := os.OpenFile(filepath.Join(ctx.GOPATH, "src", "test1", LockName), os.O_WRONLY, os.ModePerm) defer f.Close() - _, err := ctx.LoadProject("") + _, err := ctx.LoadProject() if err == nil { t.Fatal("should have returned 'Split Absolute Root' error (no 'src' dir present)") } @@ -362,7 +329,7 @@ func TestCaseInsentitiveGOPATH(t *testing.T) { h.TempDir("src") h.TempDir("src/test1") - h.TempFile(filepath.Join("src/test1", ManifestName), `[[dependencies]]`) + h.TempFile(filepath.Join("src/test1", ManifestName), `[[constraint]]`) // Shuffle letter case rs := []rune(strings.ToLower(h.Path("."))) @@ -381,7 +348,7 @@ func TestCaseInsentitiveGOPATH(t *testing.T) { } depCtx := &Ctx{GOPATH: gopath, WorkingDir: wd} - depCtx.LoadProject("") + depCtx.LoadProject() ip := "github.com/pkg/errors" fullpath := filepath.Join(depCtx.GOPATH, "src", ip) diff --git a/fs.go b/fs.go deleted file mode 100644 index aea991f1d4..0000000000 --- a/fs.go +++ /dev/null @@ -1,220 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package dep - -import ( - "io" - "io/ioutil" - "os" - "path/filepath" - "runtime" - "syscall" - - "github.com/golang/dep/internal" - "github.com/pelletier/go-toml" - "github.com/pkg/errors" -) - -func IsRegular(name string) (bool, error) { - // TODO: lstat? - fi, err := os.Stat(name) - if os.IsNotExist(err) { - return false, nil - } - if err != nil { - return false, err - } - if fi.IsDir() { - return false, errors.Errorf("%q is a directory, should be a file", name) - } - return true, nil -} - -func IsDir(name string) (bool, error) { - return internal.IsDir(name) -} - -func IsNonEmptyDir(name string) (bool, error) { - isDir, err := IsDir(name) - if !isDir || err != nil { - return isDir, err - } - - // Get file descriptor - f, err := os.Open(name) - if err != nil { - return false, err - } - defer f.Close() - - // Query only 1 child. EOF if no children. - _, err = f.Readdirnames(1) - switch err { - case io.EOF: - return false, nil - case nil: - return true, nil - default: - return false, err - } -} - -func writeFile(path string, in toml.Marshaler) error { - f, err := os.Create(path) - if err != nil { - return err - } - defer f.Close() - - s, err := in.MarshalTOML() - if err != nil { - return err - } - - _, err = f.Write(s) - return err -} - -// modifyWithString modifies a given file with a new string input. -// This is used to write arbitrary string data to a file, such as -// updating the `Gopkg.toml` file with example data if no deps found -// on init. -func modifyWithString(path, data string) error { - return ioutil.WriteFile(path, []byte(data), 0644) -} - -// renameWithFallback attempts to rename a file or directory, but falls back to -// copying in the event of a cross-link device error. If the fallback copy -// succeeds, src is still removed, emulating normal rename behavior. -func renameWithFallback(src, dest string) error { - fi, err := os.Lstat(src) - if err != nil { - return errors.Wrapf(err, "cannot stat %s", src) - } - - // Windows cannot use syscall.Rename to rename a directory - if runtime.GOOS == "windows" && fi.IsDir() { - if err := CopyDir(src, dest); err != nil { - return err - } - return errors.Wrapf(os.RemoveAll(src), "cannot delete %s", src) - } - - err = os.Rename(src, dest) - if err == nil { - return nil - } - - terr, ok := err.(*os.LinkError) - if !ok { - return errors.Wrapf(err, "cannot rename %s to %s", src, dest) - } - - // Rename may fail if src and dest are on different devices; fall back to - // copy if we detect that case. syscall.EXDEV is the common name for the - // cross device link error which has varying output text across different - // operating systems. - var cerr error - if terr.Err == syscall.EXDEV { - if fi.IsDir() { - cerr = CopyDir(src, dest) - } else { - cerr = CopyFile(src, dest) - } - } else if runtime.GOOS == "windows" { - // In windows it can drop down to an operating system call that - // returns an operating system error with a different number and - // message. Checking for that as a fall back. - noerr, ok := terr.Err.(syscall.Errno) - // 0x11 (ERROR_NOT_SAME_DEVICE) is the windows error. - // See https://msdn.microsoft.com/en-us/library/cc231199.aspx - if ok && noerr == 0x11 { - cerr = CopyFile(src, dest) - } - } else { - return errors.Wrapf(terr, "link error: cannot rename %s to %s", src, dest) - } - - if cerr != nil { - return errors.Wrapf(cerr, "second attemp failed: cannot rename %s to %s", src, dest) - } - - return errors.Wrapf(os.RemoveAll(src), "cannot delete %s", src) -} - -// CopyDir takes in a directory and copies its contents to the destination. -// It preserves the file mode on files as well. -func CopyDir(src string, dest string) error { - fi, err := os.Lstat(src) - if err != nil { - return errors.Wrapf(err, "cannot stat %s", src) - } - - err = os.MkdirAll(dest, fi.Mode()) - if err != nil { - return errors.Wrapf(err, "cannot mkdir %s", dest) - } - - dir, err := os.Open(src) - if err != nil { - return errors.Wrapf(err, "cannot open %s", src) - } - defer dir.Close() - - objects, err := dir.Readdir(-1) - if err != nil { - return errors.Wrapf(err, "cannot read directory %s", dir.Name()) - } - - for _, obj := range objects { - if obj.Mode()&os.ModeSymlink != 0 { - continue - } - - srcfile := filepath.Join(src, obj.Name()) - destfile := filepath.Join(dest, obj.Name()) - - if obj.IsDir() { - err = CopyDir(srcfile, destfile) - if err != nil { - return err - } - continue - } - - if err := CopyFile(srcfile, destfile); err != nil { - return err - } - } - - return nil -} - -// CopyFile copies a file from one place to another with the permission bits -// preserved as well. -func CopyFile(src string, dest string) error { - srcfile, err := os.Open(src) - if err != nil { - return err - } - defer srcfile.Close() - - destfile, err := os.Create(dest) - if err != nil { - return err - } - defer destfile.Close() - - if _, err := io.Copy(destfile, srcfile); err != nil { - return err - } - - srcinfo, err := os.Stat(src) - if err != nil { - return err - } - - return os.Chmod(dest, srcinfo.Mode()) -} diff --git a/fs_test.go b/fs_test.go deleted file mode 100644 index 89da68aab4..0000000000 --- a/fs_test.go +++ /dev/null @@ -1,500 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package dep - -import ( - "io/ioutil" - "os" - "path/filepath" - "runtime" - "testing" - - "github.com/golang/dep/internal/test" -) - -func TestCopyDir(t *testing.T) { - dir, err := ioutil.TempDir("", "dep") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir) - - srcdir := filepath.Join(dir, "src") - if err = os.MkdirAll(srcdir, 0755); err != nil { - t.Fatal(err) - } - - files := []struct { - path string - contents string - fi os.FileInfo - }{ - {path: "myfile", contents: "hello world"}, - {path: filepath.Join("subdir", "file"), contents: "subdir file"}, - } - - // Create structure indicated in 'files' - for i, file := range files { - fn := filepath.Join(srcdir, file.path) - dn := filepath.Dir(fn) - if err = os.MkdirAll(dn, 0755); err != nil { - t.Fatal(err) - } - - fh, err := os.Create(fn) - if err != nil { - t.Fatal(err) - } - - if _, err = fh.Write([]byte(file.contents)); err != nil { - t.Fatal(err) - } - fh.Close() - - files[i].fi, err = os.Stat(fn) - if err != nil { - t.Fatal(err) - } - } - - destdir := filepath.Join(dir, "dest") - if err = CopyDir(srcdir, destdir); err != nil { - t.Fatal(err) - } - - // Compare copy against structure indicated in 'files' - for _, file := range files { - fn := filepath.Join(srcdir, file.path) - dn := filepath.Dir(fn) - dirOK, err := IsDir(dn) - if err != nil { - t.Fatal(err) - } - if !dirOK { - t.Fatalf("expected %s to be a directory", dn) - } - - got, err := ioutil.ReadFile(fn) - if err != nil { - t.Fatal(err) - } - - if file.contents != string(got) { - t.Fatalf("expected: %s, got: %s", file.contents, string(got)) - } - - gotinfo, err := os.Stat(fn) - if err != nil { - t.Fatal(err) - } - - if file.fi.Mode() != gotinfo.Mode() { - t.Fatalf("expected %s: %#v\n to be the same mode as %s: %#v", - file.path, file.fi.Mode(), fn, gotinfo.Mode()) - } - } -} - -func TestCopyDirFailSrc(t *testing.T) { - if runtime.GOOS == "windows" { - // XXX: setting permissions works differently in - // Microsoft Windows. Skipping this this until a - // compatible implementation is provided. - t.Skip("skipping on windows") - } - - var srcdir, dstdir string - - err, cleanup := setupInaccesibleDir(func(dir string) (err error) { - srcdir = filepath.Join(dir, "src") - return os.MkdirAll(srcdir, 0755) - }) - - defer cleanup() - - if err != nil { - t.Fatal(err) - } - - dir, err := ioutil.TempDir("", "dep") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir) - - dstdir = filepath.Join(dir, "dst") - if err = CopyDir(srcdir, dstdir); err == nil { - t.Fatalf("expected error for CopyDir(%s, %s), got none", srcdir, dstdir) - } -} - -func TestCopyDirFailDst(t *testing.T) { - if runtime.GOOS == "windows" { - // XXX: setting permissions works differently in - // Microsoft Windows. Skipping this this until a - // compatible implementation is provided. - t.Skip("skipping on windows") - } - - var srcdir, dstdir string - - dir, err := ioutil.TempDir("", "dep") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir) - - srcdir = filepath.Join(dir, "src") - if err = os.MkdirAll(srcdir, 0755); err != nil { - t.Fatal(err) - } - - err, cleanup := setupInaccesibleDir(func(dir string) error { - dstdir = filepath.Join(dir, "dst") - return nil - }) - - defer cleanup() - - if err != nil { - t.Fatal(err) - } - - if err = CopyDir(srcdir, dstdir); err == nil { - t.Fatalf("expected error for CopyDir(%s, %s), got none", srcdir, dstdir) - } -} - -func TestCopyDirFailOpen(t *testing.T) { - if runtime.GOOS == "windows" { - // XXX: setting permissions works differently in - // Microsoft Windows. os.Chmod(..., 0222) below is not - // enough for the file to be readonly, and os.Chmod(..., - // 0000) returns an invalid argument error. Skipping - // this this until a compatible implementation is - // provided. - t.Skip("skipping on windows") - } - - var srcdir, dstdir string - - dir, err := ioutil.TempDir("", "dep") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir) - - srcdir = filepath.Join(dir, "src") - if err = os.MkdirAll(srcdir, 0755); err != nil { - t.Fatal(err) - } - - srcfn := filepath.Join(srcdir, "file") - srcf, err := os.Create(srcfn) - if err != nil { - t.Fatal(err) - } - srcf.Close() - - // setup source file so that it cannot be read - if err = os.Chmod(srcfn, 0222); err != nil { - t.Fatal(err) - } - - dstdir = filepath.Join(dir, "dst") - - if err = CopyDir(srcdir, dstdir); err == nil { - t.Fatalf("expected error for CopyDir(%s, %s), got none", srcdir, dstdir) - } -} - -func TestCopyFile(t *testing.T) { - dir, err := ioutil.TempDir("", "dep") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir) - - srcf, err := os.Create(filepath.Join(dir, "srcfile")) - if err != nil { - t.Fatal(err) - } - - want := "hello world" - if _, err := srcf.Write([]byte(want)); err != nil { - t.Fatal(err) - } - srcf.Close() - - destf := filepath.Join(dir, "destf") - if err := CopyFile(srcf.Name(), destf); err != nil { - t.Fatal(err) - } - - got, err := ioutil.ReadFile(destf) - if err != nil { - t.Fatal(err) - } - - if want != string(got) { - t.Fatalf("expected: %s, got: %s", want, string(got)) - } - - wantinfo, err := os.Stat(srcf.Name()) - if err != nil { - t.Fatal(err) - } - - gotinfo, err := os.Stat(destf) - if err != nil { - t.Fatal(err) - } - - if wantinfo.Mode() != gotinfo.Mode() { - t.Fatalf("expected %s: %#v\n to be the same mode as %s: %#v", srcf.Name(), wantinfo.Mode(), destf, gotinfo.Mode()) - } -} - -func TestCopyFileFail(t *testing.T) { - if runtime.GOOS == "windows" { - // XXX: setting permissions works differently in - // Microsoft Windows. Skipping this this until a - // compatible implementation is provided. - t.Skip("skipping on windows") - } - - dir, err := ioutil.TempDir("", "dep") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir) - - srcf, err := os.Create(filepath.Join(dir, "srcfile")) - if err != nil { - t.Fatal(err) - } - srcf.Close() - - var dstdir string - - err, cleanup := setupInaccesibleDir(func(dir string) error { - dstdir = filepath.Join(dir, "dir") - return os.Mkdir(dstdir, 0777) - }) - - defer cleanup() - - if err != nil { - t.Fatal(err) - } - - fn := filepath.Join(dstdir, "file") - if err := CopyFile(srcf.Name(), fn); err == nil { - t.Fatalf("expected error for %s, got none", fn) - } -} - -// setupInaccesibleDir creates a temporary location with a single -// directory in it, in such a way that that directory is not accessible -// after this function returns. -// -// The provided operation op is called with the directory as argument, -// so that it can create files or other test artifacts. -// -// This function returns a nil error on success, and a cleanup function -// that removes all the temporary files this function creates. It is -// the caller's responsability to call this function before the test is -// done running, whether there's an error or not. -func setupInaccesibleDir(op func(dir string) error) (err error, cleanup func()) { - cleanup = func() {} - - dir, err := ioutil.TempDir("", "dep") - if err != nil { - return err, cleanup - } - - subdir := filepath.Join(dir, "dir") - - cleanup = func() { - os.Chmod(subdir, 0777) - os.RemoveAll(dir) - } - - if err = os.Mkdir(subdir, 0777); err != nil { - return err, cleanup - } - - if err = op(subdir); err != nil { - return err, cleanup - } - - if err = os.Chmod(subdir, 0666); err != nil { - return err, cleanup - } - - return err, cleanup -} - -func TestIsRegular(t *testing.T) { - wd, err := os.Getwd() - if err != nil { - t.Fatal(err) - } - - var fn string - - err, cleanup := setupInaccesibleDir(func(dir string) error { - fn = filepath.Join(dir, "file") - fh, err := os.Create(fn) - if err != nil { - return err - } - - return fh.Close() - }) - - defer cleanup() - - if err != nil { - t.Fatal(err) - } - - tests := map[string]struct { - exists bool - err bool - }{ - wd: {false, true}, - filepath.Join(wd, "testdata"): {false, true}, - filepath.Join(wd, "cmd", "dep", "main.go"): {true, false}, - filepath.Join(wd, "this_file_does_not_exist.thing"): {false, false}, - fn: {false, true}, - } - - if runtime.GOOS == "windows" { - // This test doesn't work on Microsoft Windows because - // of the differences in how file permissions are - // implemented. For this to work, the directory where - // the file exists should be inaccessible. - delete(tests, fn) - } - - for f, want := range tests { - got, err := IsRegular(f) - if err != nil { - if want.exists != got { - t.Fatalf("expected %t for %s, got %t", want.exists, f, got) - } - if !want.err { - t.Fatalf("expected no error, got %v", err) - } - } else { - if want.err { - t.Fatalf("expected error for %s, got none", f) - } - } - - if got != want.exists { - t.Fatalf("expected %t for %s, got %t", want, f, got) - } - } - -} - -func TestIsDir(t *testing.T) { - wd, err := os.Getwd() - if err != nil { - t.Fatal(err) - } - - var dn string - - err, cleanup := setupInaccesibleDir(func(dir string) error { - dn = filepath.Join(dir, "dir") - return os.Mkdir(dn, 0777) - }) - - defer cleanup() - - if err != nil { - t.Fatal(err) - } - - tests := map[string]struct { - exists bool - err bool - }{ - wd: {true, false}, - filepath.Join(wd, "testdata"): {true, false}, - filepath.Join(wd, "main.go"): {false, false}, - filepath.Join(wd, "this_file_does_not_exist.thing"): {false, true}, - dn: {false, true}, - } - - if runtime.GOOS == "windows" { - // This test doesn't work on Microsoft Windows because - // of the differences in how file permissions are - // implemented. For this to work, the directory where - // the directory exists should be inaccessible. - delete(tests, dn) - } - - for f, want := range tests { - got, err := IsDir(f) - if err != nil { - if want.exists != got { - t.Fatalf("expected %t for %s, got %t", want.exists, f, got) - } - if !want.err { - t.Fatalf("expected no error, got %v", err) - } - } - - if got != want.exists { - t.Fatalf("expected %t for %s, got %t", want.exists, f, got) - } - } - -} - -func TestIsEmpty(t *testing.T) { - wd, err := os.Getwd() - if err != nil { - t.Fatal(err) - } - - h := test.NewHelper(t) - defer h.Cleanup() - - h.TempDir("empty") - tests := map[string]string{ - wd: "true", - "testdata": "true", - filepath.Join(wd, "fs.go"): "err", - filepath.Join(wd, "this_file_does_not_exist.thing"): "false", - h.Path("empty"): "false", - } - - for f, want := range tests { - empty, err := IsNonEmptyDir(f) - if want == "err" { - if err == nil { - t.Fatalf("Wanted an error for %v, but it was nil", f) - } - if empty { - t.Fatalf("Wanted false with error for %v, but got true", f) - } - } else if err != nil { - t.Fatalf("Wanted no error for %v, got %v", f, err) - } - - if want == "true" && !empty { - t.Fatalf("Wanted true for %v, but got false", f) - } - - if want == "false" && empty { - t.Fatalf("Wanted false for %v, but got true", f) - } - } -} diff --git a/hack/validate-vendor.bash b/hack/validate-vendor.bash index 770e44563a..c289984532 100755 --- a/hack/validate-vendor.bash +++ b/hack/validate-vendor.bash @@ -31,6 +31,14 @@ files=( $(validate_diff --diff-filter=ACMR --name-only -- 'Gopkg.toml' 'Gopkg.lo unset IFS if [ ${#files[@]} -gt 0 ]; then + # This will delete memo section from Gopkg.lock + # See https://github.com/golang/dep/issues/645 for more info + # This should go away after -vendor-only flag will be implemented + # sed -i not used because it works different on MacOS and Linux + TMP_FILE=`mktemp /tmp/Gopkg.lock.XXXXXXXXXX` + sed '/memo = \S*/d' Gopkg.lock > $TMP_FILE + mv $TMP_FILE Gopkg.lock + # We run ensure to and see if we have a diff afterwards go build ./cmd/dep ./dep ensure diff --git a/internal/feedback/feedback.go b/internal/feedback/feedback.go index 66b4f1550a..17462097ac 100644 --- a/internal/feedback/feedback.go +++ b/internal/feedback/feedback.go @@ -6,38 +6,43 @@ package feedback import ( "fmt" - - "github.com/golang/dep" + "log" ) // Constraint types const ConsTypeConstraint = "constraint" const ConsTypeHint = "hint" -// Dependency types +// DepTypeDirect represents a direct dependency const DepTypeDirect = "direct dep" + +// DepTypeTransitive represents a transitive dependency, +// or a dependency of a dependency const DepTypeTransitive = "transitive dep" +// DepTypeImported represents a dependency imported by an external tool +const DepTypeImported = "imported dep" + // ConstraintFeedback holds project constraint feedback data type ConstraintFeedback struct { Version, LockedVersion, Revision, ConstraintType, DependencyType, ProjectPath string } // LogFeedback logs the feedback -func (cf ConstraintFeedback) LogFeedback(ctx *dep.Ctx) { +func (cf ConstraintFeedback) LogFeedback(logger *log.Logger) { // "Using" feedback for direct dep - if cf.DependencyType == DepTypeDirect { + if cf.DependencyType == DepTypeDirect || cf.DependencyType == DepTypeImported { ver := cf.Version // revision as version for hint if cf.ConstraintType == ConsTypeHint { ver = cf.Revision } - ctx.Loggers.Err.Printf(" %v", GetUsingFeedback(ver, cf.ConstraintType, cf.DependencyType, cf.ProjectPath)) + logger.Printf(" %v", GetUsingFeedback(ver, cf.ConstraintType, cf.DependencyType, cf.ProjectPath)) } // No "Locking" feedback for hints. "Locking" feedback only for constraint // and transitive dep if cf.ConstraintType != ConsTypeHint { - ctx.Loggers.Err.Printf(" %v", GetLockingFeedback(cf.LockedVersion, cf.Revision, cf.DependencyType, cf.ProjectPath)) + logger.Printf(" %v", GetLockingFeedback(cf.LockedVersion, cf.Revision, cf.DependencyType, cf.ProjectPath)) } } @@ -46,6 +51,9 @@ func (cf ConstraintFeedback) LogFeedback(ctx *dep.Ctx) { // Using ^1.0.0 as constraint for direct dep github.com/foo/bar // Using 1b8edb3 as hint for direct dep github.com/bar/baz func GetUsingFeedback(version, consType, depType, projectPath string) string { + if depType == DepTypeImported { + return fmt.Sprintf("Using %s as initial %s for %s %s", version, consType, depType, projectPath) + } return fmt.Sprintf("Using %s as %s for %s %s", version, consType, depType, projectPath) } @@ -54,5 +62,8 @@ func GetUsingFeedback(version, consType, depType, projectPath string) string { // Locking in v1.1.4 (bc29b4f) for direct dep github.com/foo/bar // Locking in master (436f39d) for transitive dep github.com/baz/qux func GetLockingFeedback(version, revision, depType, projectPath string) string { + if depType == DepTypeImported { + return fmt.Sprintf("Trying %s (%s) as initial lock for %s %s", version, revision, depType, projectPath) + } return fmt.Sprintf("Locking in %s (%s) for %s %s", version, revision, depType, projectPath) } diff --git a/internal/feedback/feedback_test.go b/internal/feedback/feedback_test.go index 1914346175..1986cfc459 100644 --- a/internal/feedback/feedback_test.go +++ b/internal/feedback/feedback_test.go @@ -17,14 +17,26 @@ func TestGetConstraintString(t *testing.T) { feedback: GetUsingFeedback("^1.0.0", ConsTypeConstraint, DepTypeDirect, "github.com/foo/bar"), want: "Using ^1.0.0 as constraint for direct dep github.com/foo/bar", }, + { + feedback: GetUsingFeedback("^1.0.0", ConsTypeConstraint, DepTypeImported, "github.com/foo/bar"), + want: "Using ^1.0.0 as initial constraint for imported dep github.com/foo/bar", + }, { feedback: GetUsingFeedback("1b8edb3", ConsTypeHint, DepTypeDirect, "github.com/bar/baz"), want: "Using 1b8edb3 as hint for direct dep github.com/bar/baz", }, + { + feedback: GetUsingFeedback("1b8edb3", ConsTypeHint, DepTypeImported, "github.com/bar/baz"), + want: "Using 1b8edb3 as initial hint for imported dep github.com/bar/baz", + }, { feedback: GetLockingFeedback("v1.1.4", "bc29b4f", DepTypeDirect, "github.com/foo/bar"), want: "Locking in v1.1.4 (bc29b4f) for direct dep github.com/foo/bar", }, + { + feedback: GetLockingFeedback("v1.1.4", "bc29b4f", DepTypeImported, "github.com/foo/bar"), + want: "Trying v1.1.4 (bc29b4f) as initial lock for imported dep github.com/foo/bar", + }, { feedback: GetLockingFeedback("master", "436f39d", DepTypeTransitive, "github.com/baz/qux"), want: "Locking in master (436f39d) for transitive dep github.com/baz/qux", @@ -33,7 +45,7 @@ func TestGetConstraintString(t *testing.T) { for _, c := range cases { if c.want != c.feedback { - t.Fatalf("Feedbacks are not expected: \n\t(GOT) %v\n\t(WNT) %v", c.feedback, c.want) + t.Errorf("Feedbacks are not expected: \n\t(GOT) %v\n\t(WNT) %v", c.feedback, c.want) } } } diff --git a/internal/fs.go b/internal/fs.go deleted file mode 100644 index f47f2d3473..0000000000 --- a/internal/fs.go +++ /dev/null @@ -1,158 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package internal - -import ( - "os" - "path/filepath" - "strings" - "unicode" - - "github.com/pkg/errors" -) - -func IsDir(name string) (bool, error) { - // TODO: lstat? - fi, err := os.Stat(name) - if os.IsNotExist(err) { - return false, nil - } - if err != nil { - return false, err - } - if !fi.IsDir() { - return false, errors.Errorf("%q is not a directory", name) - } - return true, nil -} - -// HasFilepathPrefix will determine if "path" starts with "prefix" from -// the point of view of a filesystem. -// -// Unlike filepath.HasPrefix, this function is path-aware, meaning that -// it knows that two directories /foo and /foobar are not the same -// thing, and therefore HasFilepathPrefix("/foobar", "/foo") will return -// false. -// -// This function also handles the case where the involved filesystems -// are case-insensitive, meaning /foo/bar and /Foo/Bar correspond to the -// same file. In that situation HasFilepathPrefix("/Foo/Bar", "/foo") -// will return true. The implementation is *not* OS-specific, so a FAT32 -// filesystem mounted on Linux will be handled correctly. -func HasFilepathPrefix(path, prefix string) bool { - if filepath.VolumeName(path) != filepath.VolumeName(prefix) { - return false - } - - var dn string - - if isDir, err := IsDir(path); err != nil { - return false - } else if isDir { - dn = path - } else { - dn = filepath.Dir(path) - } - - dn = strings.TrimSuffix(dn, string(os.PathSeparator)) - prefix = strings.TrimSuffix(prefix, string(os.PathSeparator)) - - dirs := strings.Split(dn, string(os.PathSeparator))[1:] - prefixes := strings.Split(prefix, string(os.PathSeparator))[1:] - - if len(prefixes) > len(dirs) { - return false - } - - var d, p string - - for i := range prefixes { - // need to test each component of the path for - // case-sensitiveness because on Unix we could have - // something like ext4 filesystem mounted on FAT - // mountpoint, mounted on ext4 filesystem, i.e. the - // problematic filesystem is not the last one. - if isCaseSensitiveFilesystem(filepath.Join(d, dirs[i])) { - d = filepath.Join(d, dirs[i]) - p = filepath.Join(p, prefixes[i]) - } else { - d = filepath.Join(d, strings.ToLower(dirs[i])) - p = filepath.Join(p, strings.ToLower(prefixes[i])) - } - - if p != d { - return false - } - } - - return true -} - -// genTestFilename returns a string with at most one rune case-flipped. -// -// The transformation is applied only to the first rune that can be -// reversibly case-flipped, meaning: -// -// * A lowercase rune for which it's true that lower(upper(r)) == r -// * An uppercase rune for which it's true that upper(lower(r)) == r -// -// All the other runes are left intact. -func genTestFilename(str string) string { - flip := true - return strings.Map(func(r rune) rune { - if flip { - if unicode.IsLower(r) { - u := unicode.ToUpper(r) - if unicode.ToLower(u) == r { - r = u - flip = false - } - } else if unicode.IsUpper(r) { - l := unicode.ToLower(r) - if unicode.ToUpper(l) == r { - r = l - flip = false - } - } - } - return r - }, str) -} - -// isCaseSensitiveFilesystem determines if the filesystem where dir -// exists is case sensitive or not. -// -// CAVEAT: this function works by taking the last component of the given -// path and flipping the case of the first letter for which case -// flipping is a reversible operation (/foo/Bar → /foo/bar), then -// testing for the existence of the new filename. There are two -// possibilities: -// -// 1. The alternate filename does not exist. We can conclude that the -// filesystem is case sensitive. -// -// 2. The filename happens to exist. We have to test if the two files -// are the same file (case insensitive file system) or different ones -// (case sensitive filesystem). -// -// If the input directory is such that the last component is composed -// exclusively of case-less codepoints (e.g. numbers), this function will -// return false. -func isCaseSensitiveFilesystem(dir string) bool { - alt := filepath.Join(filepath.Dir(dir), - genTestFilename(filepath.Base(dir))) - - dInfo, err := os.Stat(dir) - if err != nil { - return true - } - - aInfo, err := os.Stat(alt) - if err != nil { - return true - } - - return !os.SameFile(dInfo, aInfo) -} diff --git a/internal/fs/fs.go b/internal/fs/fs.go new file mode 100644 index 0000000000..cd526f6401 --- /dev/null +++ b/internal/fs/fs.go @@ -0,0 +1,399 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package fs + +import ( + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "unicode" + + "github.com/pkg/errors" +) + +// HasFilepathPrefix will determine if "path" starts with "prefix" from +// the point of view of a filesystem. +// +// Unlike filepath.HasPrefix, this function is path-aware, meaning that +// it knows that two directories /foo and /foobar are not the same +// thing, and therefore HasFilepathPrefix("/foobar", "/foo") will return +// false. +// +// This function also handles the case where the involved filesystems +// are case-insensitive, meaning /foo/bar and /Foo/Bar correspond to the +// same file. In that situation HasFilepathPrefix("/Foo/Bar", "/foo") +// will return true. The implementation is *not* OS-specific, so a FAT32 +// filesystem mounted on Linux will be handled correctly. +func HasFilepathPrefix(path, prefix string) bool { + // this function is more convoluted then ideal due to need for special + // handling of volume name/drive letter on Windows. vnPath and vnPrefix + // are first compared, and then used to initialize initial values of p and + // d which will be appended to for incremental checks using + // isCaseSensitiveFilesystem and then equality. + + // no need to check isCaseSensitiveFilesystem because VolumeName return + // empty string on all non-Windows machines + vnPath := strings.ToLower(filepath.VolumeName(path)) + vnPrefix := strings.ToLower(filepath.VolumeName(prefix)) + if vnPath != vnPrefix { + return false + } + + // because filepath.Join("c:","dir") returns "c:dir", we have to manually add path separator to drive letters + if strings.HasSuffix(vnPath, ":") { + vnPath += string(os.PathSeparator) + } + if strings.HasSuffix(vnPrefix, ":") { + vnPrefix += string(os.PathSeparator) + } + + var dn string + + if isDir, err := IsDir(path); err != nil { + return false + } else if isDir { + dn = path + } else { + dn = filepath.Dir(path) + } + + dn = strings.TrimSuffix(dn, string(os.PathSeparator)) + prefix = strings.TrimSuffix(prefix, string(os.PathSeparator)) + + // [1:] in the lines below eliminates empty string on *nix and volume name on Windows + dirs := strings.Split(dn, string(os.PathSeparator))[1:] + prefixes := strings.Split(prefix, string(os.PathSeparator))[1:] + + if len(prefixes) > len(dirs) { + return false + } + + // d,p are initialized with "" on *nix and volume name on Windows + d := vnPath + p := vnPrefix + + for i := range prefixes { + // need to test each component of the path for + // case-sensitiveness because on Unix we could have + // something like ext4 filesystem mounted on FAT + // mountpoint, mounted on ext4 filesystem, i.e. the + // problematic filesystem is not the last one. + if isCaseSensitiveFilesystem(filepath.Join(d, dirs[i])) { + d = filepath.Join(d, dirs[i]) + p = filepath.Join(p, prefixes[i]) + } else { + d = filepath.Join(d, strings.ToLower(dirs[i])) + p = filepath.Join(p, strings.ToLower(prefixes[i])) + } + + if p != d { + return false + } + } + + return true +} + +// RenameWithFallback attempts to rename a file or directory, but falls back to +// copying in the event of a cross-device link error. If the fallback copy +// succeeds, src is still removed, emulating normal rename behavior. +func RenameWithFallback(src, dst string) error { + _, err := os.Stat(src) + if err != nil { + return errors.Wrapf(err, "cannot stat %s", src) + } + + err = rename(src, dst) + if err == nil { + return nil + } + + return renameFallback(err, src, dst) +} + +// renameByCopy attempts to rename a file or directory by copying it to the +// destination and then removing the src thus emulating the rename behavior. +func renameByCopy(src, dst string) error { + var cerr error + if dir, _ := IsDir(src); dir { + cerr = CopyDir(src, dst) + if cerr != nil { + cerr = errors.Wrap(cerr, "copying directory failed") + } + } else { + cerr = copyFile(src, dst) + if cerr != nil { + cerr = errors.Wrap(cerr, "copying file failed") + } + } + + if cerr != nil { + return errors.Wrapf(cerr, "rename fallback failed: cannot rename %s to %s", src, dst) + } + + return errors.Wrapf(os.RemoveAll(src), "cannot delete %s", src) +} + +// isCaseSensitiveFilesystem determines if the filesystem where dir +// exists is case sensitive or not. +// +// CAVEAT: this function works by taking the last component of the given +// path and flipping the case of the first letter for which case +// flipping is a reversible operation (/foo/Bar → /foo/bar), then +// testing for the existence of the new filename. There are two +// possibilities: +// +// 1. The alternate filename does not exist. We can conclude that the +// filesystem is case sensitive. +// +// 2. The filename happens to exist. We have to test if the two files +// are the same file (case insensitive file system) or different ones +// (case sensitive filesystem). +// +// If the input directory is such that the last component is composed +// exclusively of case-less codepoints (e.g. numbers), this function will +// return false. +func isCaseSensitiveFilesystem(dir string) bool { + alt := filepath.Join(filepath.Dir(dir), + genTestFilename(filepath.Base(dir))) + + dInfo, err := os.Stat(dir) + if err != nil { + return true + } + + aInfo, err := os.Stat(alt) + if err != nil { + return true + } + + return !os.SameFile(dInfo, aInfo) +} + +// genTestFilename returns a string with at most one rune case-flipped. +// +// The transformation is applied only to the first rune that can be +// reversibly case-flipped, meaning: +// +// * A lowercase rune for which it's true that lower(upper(r)) == r +// * An uppercase rune for which it's true that upper(lower(r)) == r +// +// All the other runes are left intact. +func genTestFilename(str string) string { + flip := true + return strings.Map(func(r rune) rune { + if flip { + if unicode.IsLower(r) { + u := unicode.ToUpper(r) + if unicode.ToLower(u) == r { + r = u + flip = false + } + } else if unicode.IsUpper(r) { + l := unicode.ToLower(r) + if unicode.ToUpper(l) == r { + r = l + flip = false + } + } + } + return r + }, str) +} + +var ( + errSrcNotDir = errors.New("source is not a directory") + errDstExist = errors.New("destination already exists") +) + +// CopyDir recursively copies a directory tree, attempting to preserve permissions. +// Source directory must exist, destination directory must *not* exist. +func CopyDir(src, dst string) error { + src = filepath.Clean(src) + dst = filepath.Clean(dst) + + // We use os.Lstat() here to ensure we don't fall in a loop where a symlink + // actually links to a one of its parent directories. + fi, err := os.Lstat(src) + if err != nil { + return err + } + if !fi.IsDir() { + return errSrcNotDir + } + + _, err = os.Stat(dst) + if err != nil && !os.IsNotExist(err) { + return err + } + if err == nil { + return errDstExist + } + + if err = os.MkdirAll(dst, fi.Mode()); err != nil { + return errors.Wrapf(err, "cannot mkdir %s", dst) + } + + entries, err := ioutil.ReadDir(src) + if err != nil { + return errors.Wrapf(err, "cannot read directory %s", dst) + } + + for _, entry := range entries { + srcPath := filepath.Join(src, entry.Name()) + dstPath := filepath.Join(dst, entry.Name()) + + if entry.IsDir() { + if err = CopyDir(srcPath, dstPath); err != nil { + return errors.Wrap(err, "copying directory failed") + } + } else { + // This will include symlinks, which is what we want when + // copying things. + if err = copyFile(srcPath, dstPath); err != nil { + return errors.Wrap(err, "copying file failed") + } + } + } + + return nil +} + +// copyFile copies the contents of the file named src to the file named +// by dst. The file will be created if it does not already exist. If the +// destination file exists, all its contents will be replaced by the contents +// of the source file. The file mode will be copied from the source and +// the copied data is synced/flushed to stable storage. +func copyFile(src, dst string) (err error) { + if sym, err := IsSymlink(src); err != nil { + return err + } else if sym { + err := copySymlink(src, dst) + return err + } + + in, err := os.Open(src) + if err != nil { + return + } + defer in.Close() + + out, err := os.Create(dst) + if err != nil { + return + } + defer func() { + if e := out.Close(); e != nil { + err = e + } + }() + + _, err = io.Copy(out, in) + if err != nil { + return + } + + err = out.Sync() + if err != nil { + return + } + + si, err := os.Stat(src) + if err != nil { + return + } + err = os.Chmod(dst, si.Mode()) + if err != nil { + return + } + + return +} + +// copySymlink will resolve the src symlink and create a new symlink in dst. +// If src is a relative symlink, dst will also be a relative symlink. +func copySymlink(src, dst string) error { + resolved, err := os.Readlink(src) + if err != nil { + return errors.Wrap(err, "failed to resolve symlink") + } + + err = os.Symlink(resolved, dst) + if err != nil { + return errors.Wrapf(err, "failed to create symlink %s to %s", src, resolved) + } + + return nil +} + +// IsDir determines is the path given is a directory or not. +func IsDir(name string) (bool, error) { + // TODO: lstat? + fi, err := os.Stat(name) + if os.IsNotExist(err) { + return false, nil + } + if err != nil { + return false, err + } + if !fi.IsDir() { + return false, errors.Errorf("%q is not a directory", name) + } + return true, nil +} + +// IsNonEmptyDir determines if the path given is a non-empty directory or not. +func IsNonEmptyDir(name string) (bool, error) { + isDir, err := IsDir(name) + if !isDir || err != nil { + return false, err + } + + // Get file descriptor + f, err := os.Open(name) + if err != nil { + return false, err + } + defer f.Close() + + // Query only 1 child. EOF if no children. + _, err = f.Readdirnames(1) + switch err { + case io.EOF: + return false, nil + case nil: + return true, nil + default: + return false, err + } +} + +// IsRegular determines if the path given is a regular file or not. +func IsRegular(name string) (bool, error) { + // TODO: lstat? + fi, err := os.Stat(name) + if os.IsNotExist(err) { + return false, nil + } + if err != nil { + return false, err + } + if fi.IsDir() { + return false, errors.Errorf("%q is a directory, should be a file", name) + } + return true, nil +} + +// IsSymlink determines if the given path is a symbolic link. +func IsSymlink(path string) (bool, error) { + l, err := os.Lstat(path) + if err != nil { + return false, err + } + + return l.Mode()&os.ModeSymlink == os.ModeSymlink, nil +} diff --git a/internal/fs/fs_test.go b/internal/fs/fs_test.go new file mode 100644 index 0000000000..1dfb478e89 --- /dev/null +++ b/internal/fs/fs_test.go @@ -0,0 +1,884 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package fs + +import ( + "io/ioutil" + "os" + "path/filepath" + "runtime" + "strings" + "testing" + + "github.com/golang/dep/internal/test" +) + +// This function tests HadFilepathPrefix. It should test it on both case +// sensitive and insensitive situations. However, the only reliable way to test +// case-insensitive behaviour is if using case-insensitive filesystem. This +// cannot be guaranteed in an automated test. Therefore, the behaviour of the +// tests is not to test case sensitivity on *nix and to assume that Windows is +// case-insensitive. Please see link below for some background. +// +// https://superuser.com/questions/266110/how-do-you-make-windows-7-fully-case-sensitive-with-respect-to-the-filesystem +// +// NOTE: NTFS can be made case-sensitive. However many Windows programs, +// including Windows Explorer do not handle gracefully multiple files that +// differ only in capitalization. It is possible that this can cause these tests +// to fail on some setups. +func TestHasFilepathPrefix(t *testing.T) { + dir, err := ioutil.TempDir("", "dep") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dir) + + // dir2 is the same as dir but with different capitalization on Windows to + // test case insensitivity + var dir2 string + if runtime.GOOS == "windows" { + dir = strings.ToLower(dir) + dir2 = strings.ToUpper(dir) + } else { + dir2 = dir + } + + cases := []struct { + path string + prefix string + want bool + }{ + {filepath.Join(dir, "a", "b"), filepath.Join(dir2), true}, + {filepath.Join(dir, "a", "b"), filepath.Join(dir2, "a"), true}, + {filepath.Join(dir, "a", "b"), filepath.Join(dir2, "a", "b"), true}, + {filepath.Join(dir, "a", "b"), filepath.Join(dir2, "c"), false}, + {filepath.Join(dir, "a", "b"), filepath.Join(dir2, "a", "d", "b"), false}, + {filepath.Join(dir, "a", "b"), filepath.Join(dir2, "a", "b2"), false}, + {filepath.Join(dir), filepath.Join(dir2, "a", "b"), false}, + {filepath.Join(dir, "ab"), filepath.Join(dir2, "a", "b"), false}, + {filepath.Join(dir, "ab"), filepath.Join(dir2, "a"), false}, + {filepath.Join(dir, "123"), filepath.Join(dir2, "123"), true}, + {filepath.Join(dir, "123"), filepath.Join(dir2, "1"), false}, + {filepath.Join(dir, "⌘"), filepath.Join(dir2, "⌘"), true}, + {filepath.Join(dir, "a"), filepath.Join(dir2, "⌘"), false}, + {filepath.Join(dir, "⌘"), filepath.Join(dir2, "a"), false}, + } + + for _, c := range cases { + if err := os.MkdirAll(c.path, 0755); err != nil { + t.Fatal(err) + } + + if err = os.MkdirAll(c.prefix, 0755); err != nil { + t.Fatal(err) + } + + if got := HasFilepathPrefix(c.path, c.prefix); c.want != got { + t.Fatalf("dir: %q, prefix: %q, expected: %v, got: %v", c.path, c.prefix, c.want, got) + } + } +} + +// This function tests HadFilepathPrefix. It should test it on both case +// sensitive and insensitive situations. However, the only reliable way to test +// case-insensitive behaviour is if using case-insensitive filesystem. This +// cannot be guaranteed in an automated test. Therefore, the behaviour of the +// tests is not to test case sensitivity on *nix and to assume that Windows is +// case-insensitive. Please see link below for some background. +// +// https://superuser.com/questions/266110/how-do-you-make-windows-7-fully-case-sensitive-with-respect-to-the-filesystem +// +// NOTE: NTFS can be made case-sensitive. However many Windows programs, +// including Windows Explorer do not handle gracefully multiple files that +// differ only in capitalization. It is possible that this can cause these tests +// to fail on some setups. +func TestHasFilepathPrefix_Files(t *testing.T) { + dir, err := ioutil.TempDir("", "dep") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dir) + + // dir2 is the same as dir but with different capitalization on Windows to + // test case insensitivity + var dir2 string + if runtime.GOOS == "windows" { + dir = strings.ToLower(dir) + dir2 = strings.ToUpper(dir) + } else { + dir2 = dir + } + + existingFile := filepath.Join(dir, "exists") + if _, err := os.Create(existingFile); err != nil { + t.Fatal(err) + } + + nonExistingFile := filepath.Join(dir, "does_not_exists") + + cases := []struct { + path string + prefix string + want bool + }{ + {existingFile, filepath.Join(dir2), false}, + {nonExistingFile, filepath.Join(dir2), true}, + } + + for _, c := range cases { + if got := HasFilepathPrefix(c.path, c.prefix); c.want != got { + t.Fatalf("dir: %q, prefix: %q, expected: %v, got: %v", c.path, c.prefix, c.want, got) + } + } +} + +func TestRenameWithFallback(t *testing.T) { + dir, err := ioutil.TempDir("", "dep") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dir) + + if err = RenameWithFallback(filepath.Join(dir, "does_not_exists"), filepath.Join(dir, "dst")); err == nil { + t.Fatal("expected an error for non existing file, but got nil") + } + + srcpath := filepath.Join(dir, "src") + + if srcf, err := os.Create(srcpath); err != nil { + t.Fatal(err) + } else { + srcf.Close() + } + + if err = RenameWithFallback(srcpath, filepath.Join(dir, "dst")); err != nil { + t.Fatal(err) + } + + srcpath = filepath.Join(dir, "a") + if err = os.MkdirAll(srcpath, 0777); err != nil { + t.Fatal(err) + } + + dstpath := filepath.Join(dir, "b") + if err = os.MkdirAll(dstpath, 0777); err != nil { + t.Fatal(err) + } + + if err = RenameWithFallback(srcpath, dstpath); err == nil { + t.Fatal("expected an error if dst is an existing directory, but got nil") + } +} + +func TestGenTestFilename(t *testing.T) { + cases := []struct { + str string + want string + }{ + {"abc", "Abc"}, + {"ABC", "aBC"}, + {"AbC", "abC"}, + {"αβγ", "Αβγ"}, + {"123", "123"}, + {"1a2", "1A2"}, + {"12a", "12A"}, + {"⌘", "⌘"}, + } + + for _, c := range cases { + got := genTestFilename(c.str) + if c.want != got { + t.Fatalf("str: %q, expected: %q, got: %q", c.str, c.want, got) + } + } +} + +func BenchmarkGenTestFilename(b *testing.B) { + cases := []string{ + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", + "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", + "αααααααααααααααααααααααααααααααααααααααααααααααααααααααααααααααααααααααααααααααααααααααααααααααααααααααααααααααααααααααααααααααα", + "11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111", + "⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘", + } + + for i := 0; i < b.N; i++ { + for _, str := range cases { + genTestFilename(str) + } + } +} + +func TestCopyDir(t *testing.T) { + dir, err := ioutil.TempDir("", "dep") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dir) + + srcdir := filepath.Join(dir, "src") + if err := os.MkdirAll(srcdir, 0755); err != nil { + t.Fatal(err) + } + + files := []struct { + path string + contents string + fi os.FileInfo + }{ + {path: "myfile", contents: "hello world"}, + {path: filepath.Join("subdir", "file"), contents: "subdir file"}, + } + + // Create structure indicated in 'files' + for i, file := range files { + fn := filepath.Join(srcdir, file.path) + dn := filepath.Dir(fn) + if err = os.MkdirAll(dn, 0755); err != nil { + t.Fatal(err) + } + + fh, err := os.Create(fn) + if err != nil { + t.Fatal(err) + } + + if _, err = fh.Write([]byte(file.contents)); err != nil { + t.Fatal(err) + } + fh.Close() + + files[i].fi, err = os.Stat(fn) + if err != nil { + t.Fatal(err) + } + } + + destdir := filepath.Join(dir, "dest") + if err := CopyDir(srcdir, destdir); err != nil { + t.Fatal(err) + } + + // Compare copy against structure indicated in 'files' + for _, file := range files { + fn := filepath.Join(srcdir, file.path) + dn := filepath.Dir(fn) + dirOK, err := IsDir(dn) + if err != nil { + t.Fatal(err) + } + if !dirOK { + t.Fatalf("expected %s to be a directory", dn) + } + + got, err := ioutil.ReadFile(fn) + if err != nil { + t.Fatal(err) + } + + if file.contents != string(got) { + t.Fatalf("expected: %s, got: %s", file.contents, string(got)) + } + + gotinfo, err := os.Stat(fn) + if err != nil { + t.Fatal(err) + } + + if file.fi.Mode() != gotinfo.Mode() { + t.Fatalf("expected %s: %#v\n to be the same mode as %s: %#v", + file.path, file.fi.Mode(), fn, gotinfo.Mode()) + } + } +} + +func TestCopyDirFail_SrcInaccessible(t *testing.T) { + if runtime.GOOS == "windows" { + // XXX: setting permissions works differently in + // Microsoft Windows. Skipping this this until a + // compatible implementation is provided. + t.Skip("skipping on windows") + } + + var srcdir, dstdir string + + cleanup := setupInaccessibleDir(t, func(dir string) error { + srcdir = filepath.Join(dir, "src") + return os.MkdirAll(srcdir, 0755) + }) + defer cleanup() + + dir, err := ioutil.TempDir("", "dep") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dir) + + dstdir = filepath.Join(dir, "dst") + if err = CopyDir(srcdir, dstdir); err == nil { + t.Fatalf("expected error for CopyDir(%s, %s), got none", srcdir, dstdir) + } +} + +func TestCopyDirFail_DstInaccessible(t *testing.T) { + if runtime.GOOS == "windows" { + // XXX: setting permissions works differently in + // Microsoft Windows. Skipping this this until a + // compatible implementation is provided. + t.Skip("skipping on windows") + } + + var srcdir, dstdir string + + dir, err := ioutil.TempDir("", "dep") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dir) + + srcdir = filepath.Join(dir, "src") + if err = os.MkdirAll(srcdir, 0755); err != nil { + t.Fatal(err) + } + + cleanup := setupInaccessibleDir(t, func(dir string) error { + dstdir = filepath.Join(dir, "dst") + return nil + }) + defer cleanup() + + if err := CopyDir(srcdir, dstdir); err == nil { + t.Fatalf("expected error for CopyDir(%s, %s), got none", srcdir, dstdir) + } +} + +func TestCopyDirFail_SrcIsNotDir(t *testing.T) { + var srcdir, dstdir string + + dir, err := ioutil.TempDir("", "dep") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dir) + + srcdir = filepath.Join(dir, "src") + if _, err = os.Create(srcdir); err != nil { + t.Fatal(err) + } + + dstdir = filepath.Join(dir, "dst") + + if err = CopyDir(srcdir, dstdir); err == nil { + t.Fatalf("expected error for CopyDir(%s, %s), got none", srcdir, dstdir) + } + + if err != errSrcNotDir { + t.Fatalf("expected %v error for CopyDir(%s, %s), got %s", errSrcNotDir, srcdir, dstdir, err) + } + +} + +func TestCopyDirFail_DstExists(t *testing.T) { + var srcdir, dstdir string + + dir, err := ioutil.TempDir("", "dep") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dir) + + srcdir = filepath.Join(dir, "src") + if err = os.MkdirAll(srcdir, 0755); err != nil { + t.Fatal(err) + } + + dstdir = filepath.Join(dir, "dst") + if err = os.MkdirAll(dstdir, 0755); err != nil { + t.Fatal(err) + } + + if err = CopyDir(srcdir, dstdir); err == nil { + t.Fatalf("expected error for CopyDir(%s, %s), got none", srcdir, dstdir) + } + + if err != errDstExist { + t.Fatalf("expected %v error for CopyDir(%s, %s), got %s", errDstExist, srcdir, dstdir, err) + } +} + +func TestCopyDirFailOpen(t *testing.T) { + if runtime.GOOS == "windows" { + // XXX: setting permissions works differently in + // Microsoft Windows. os.Chmod(..., 0222) below is not + // enough for the file to be readonly, and os.Chmod(..., + // 0000) returns an invalid argument error. Skipping + // this this until a compatible implementation is + // provided. + t.Skip("skipping on windows") + } + + var srcdir, dstdir string + + dir, err := ioutil.TempDir("", "dep") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dir) + + srcdir = filepath.Join(dir, "src") + if err = os.MkdirAll(srcdir, 0755); err != nil { + t.Fatal(err) + } + + srcfn := filepath.Join(srcdir, "file") + srcf, err := os.Create(srcfn) + if err != nil { + t.Fatal(err) + } + srcf.Close() + + // setup source file so that it cannot be read + if err = os.Chmod(srcfn, 0222); err != nil { + t.Fatal(err) + } + + dstdir = filepath.Join(dir, "dst") + + if err = CopyDir(srcdir, dstdir); err == nil { + t.Fatalf("expected error for CopyDir(%s, %s), got none", srcdir, dstdir) + } +} + +func TestCopyFile(t *testing.T) { + dir, err := ioutil.TempDir("", "dep") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dir) + + srcf, err := os.Create(filepath.Join(dir, "srcfile")) + if err != nil { + t.Fatal(err) + } + + want := "hello world" + if _, err := srcf.Write([]byte(want)); err != nil { + t.Fatal(err) + } + srcf.Close() + + destf := filepath.Join(dir, "destf") + if err := copyFile(srcf.Name(), destf); err != nil { + t.Fatal(err) + } + + got, err := ioutil.ReadFile(destf) + if err != nil { + t.Fatal(err) + } + + if want != string(got) { + t.Fatalf("expected: %s, got: %s", want, string(got)) + } + + wantinfo, err := os.Stat(srcf.Name()) + if err != nil { + t.Fatal(err) + } + + gotinfo, err := os.Stat(destf) + if err != nil { + t.Fatal(err) + } + + if wantinfo.Mode() != gotinfo.Mode() { + t.Fatalf("expected %s: %#v\n to be the same mode as %s: %#v", srcf.Name(), wantinfo.Mode(), destf, gotinfo.Mode()) + } +} + +func TestCopyFileSymlink(t *testing.T) { + dir, err := ioutil.TempDir("", "dep") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dir) + + srcPath := filepath.Join(dir, "src") + symlinkPath := filepath.Join(dir, "symlink") + dstPath := filepath.Join(dir, "dst") + + srcf, err := os.Create(srcPath) + if err != nil { + t.Fatal(err) + } + srcf.Close() + + if err = os.Symlink(srcPath, symlinkPath); err != nil { + t.Fatalf("could not create symlink: %s", err) + } + + if err = copyFile(symlinkPath, dstPath); err != nil { + t.Fatalf("failed to copy symlink: %s", err) + } + + resolvedPath, err := os.Readlink(dstPath) + if err != nil { + t.Fatalf("could not resolve symlink: %s", err) + } + + if resolvedPath != srcPath { + t.Fatalf("resolved path is incorrect. expected %s, got %s", srcPath, resolvedPath) + } +} + +func TestCopyFileSymlinkToDirectory(t *testing.T) { + dir, err := ioutil.TempDir("", "dep") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dir) + + srcPath := filepath.Join(dir, "src") + symlinkPath := filepath.Join(dir, "symlink") + dstPath := filepath.Join(dir, "dst") + + err = os.MkdirAll(srcPath, 0777) + if err != nil { + t.Fatal(err) + } + + if err = os.Symlink(srcPath, symlinkPath); err != nil { + t.Fatalf("could not create symlink: %v", err) + } + + if err = copyFile(symlinkPath, dstPath); err != nil { + t.Fatalf("failed to copy symlink: %s", err) + } + + resolvedPath, err := os.Readlink(dstPath) + if err != nil { + t.Fatalf("could not resolve symlink: %s", err) + } + + if resolvedPath != srcPath { + t.Fatalf("resolved path is incorrect. expected %s, got %s", srcPath, resolvedPath) + } +} + +func TestCopyFileFail(t *testing.T) { + if runtime.GOOS == "windows" { + // XXX: setting permissions works differently in + // Microsoft Windows. Skipping this this until a + // compatible implementation is provided. + t.Skip("skipping on windows") + } + + dir, err := ioutil.TempDir("", "dep") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dir) + + srcf, err := os.Create(filepath.Join(dir, "srcfile")) + if err != nil { + t.Fatal(err) + } + srcf.Close() + + var dstdir string + + cleanup := setupInaccessibleDir(t, func(dir string) error { + dstdir = filepath.Join(dir, "dir") + return os.Mkdir(dstdir, 0777) + }) + defer cleanup() + + fn := filepath.Join(dstdir, "file") + if err := copyFile(srcf.Name(), fn); err == nil { + t.Fatalf("expected error for %s, got none", fn) + } +} + +// setupInaccessibleDir creates a temporary location with a single +// directory in it, in such a way that that directory is not accessible +// after this function returns. +// +// op is called with the directory as argument, so that it can create +// files or other test artifacts. +// +// If setupInaccessibleDir fails in its preparation, or op fails, t.Fatal +// will be invoked. +// +// This function returns a cleanup function that removes all the temporary +// files this function creates. It is the caller's responsibility to call +// this function before the test is done running, whether there's an error or not. +func setupInaccessibleDir(t *testing.T, op func(dir string) error) func() { + dir, err := ioutil.TempDir("", "dep") + if err != nil { + t.Fatal(err) + return nil // keep compiler happy + } + + subdir := filepath.Join(dir, "dir") + + cleanup := func() { + if err := os.Chmod(subdir, 0777); err != nil { + t.Error(err) + } + if err := os.RemoveAll(dir); err != nil { + t.Error(err) + } + } + + if err := os.Mkdir(subdir, 0777); err != nil { + cleanup() + t.Fatal(err) + return nil + } + + if err := op(subdir); err != nil { + cleanup() + t.Fatal(err) + return nil + } + + if err := os.Chmod(subdir, 0666); err != nil { + cleanup() + t.Fatal(err) + return nil + } + + return cleanup +} + +func TestIsRegular(t *testing.T) { + wd, err := os.Getwd() + if err != nil { + t.Fatal(err) + } + + var fn string + + cleanup := setupInaccessibleDir(t, func(dir string) error { + fn = filepath.Join(dir, "file") + fh, err := os.Create(fn) + if err != nil { + return err + } + + return fh.Close() + }) + defer cleanup() + + tests := map[string]struct { + exists bool + err bool + }{ + wd: {false, true}, + filepath.Join(wd, "testdata"): {false, true}, + filepath.Join(wd, "testdata", "test.file"): {true, false}, + filepath.Join(wd, "this_file_does_not_exist.thing"): {false, false}, + fn: {false, true}, + } + + if runtime.GOOS == "windows" { + // This test doesn't work on Microsoft Windows because + // of the differences in how file permissions are + // implemented. For this to work, the directory where + // the file exists should be inaccessible. + delete(tests, fn) + } + + for f, want := range tests { + got, err := IsRegular(f) + if err != nil { + if want.exists != got { + t.Fatalf("expected %t for %s, got %t", want.exists, f, got) + } + if !want.err { + t.Fatalf("expected no error, got %v", err) + } + } else { + if want.err { + t.Fatalf("expected error for %s, got none", f) + } + } + + if got != want.exists { + t.Fatalf("expected %t for %s, got %t", want, f, got) + } + } + +} + +func TestIsDir(t *testing.T) { + wd, err := os.Getwd() + if err != nil { + t.Fatal(err) + } + + var dn string + + cleanup := setupInaccessibleDir(t, func(dir string) error { + dn = filepath.Join(dir, "dir") + return os.Mkdir(dn, 0777) + }) + defer cleanup() + + tests := map[string]struct { + exists bool + err bool + }{ + wd: {true, false}, + filepath.Join(wd, "testdata"): {true, false}, + filepath.Join(wd, "main.go"): {false, false}, + filepath.Join(wd, "this_file_does_not_exist.thing"): {false, true}, + dn: {false, true}, + } + + if runtime.GOOS == "windows" { + // This test doesn't work on Microsoft Windows because + // of the differences in how file permissions are + // implemented. For this to work, the directory where + // the directory exists should be inaccessible. + delete(tests, dn) + } + + for f, want := range tests { + got, err := IsDir(f) + if err != nil { + if want.exists != got { + t.Fatalf("expected %t for %s, got %t", want.exists, f, got) + } + if !want.err { + t.Fatalf("expected no error, got %v", err) + } + } + + if got != want.exists { + t.Fatalf("expected %t for %s, got %t", want.exists, f, got) + } + } +} + +func TestIsEmpty(t *testing.T) { + wd, err := os.Getwd() + if err != nil { + t.Fatal(err) + } + + h := test.NewHelper(t) + defer h.Cleanup() + + h.TempDir("empty") + tests := map[string]string{ + wd: "true", + "testdata": "true", + filepath.Join(wd, "fs.go"): "err", + filepath.Join(wd, "this_file_does_not_exist.thing"): "false", + h.Path("empty"): "false", + } + + for f, want := range tests { + empty, err := IsNonEmptyDir(f) + if want == "err" { + if err == nil { + t.Fatalf("Wanted an error for %v, but it was nil", f) + } + if empty { + t.Fatalf("Wanted false with error for %v, but got true", f) + } + } else if err != nil { + t.Fatalf("Wanted no error for %v, got %v", f, err) + } + + if want == "true" && !empty { + t.Fatalf("Wanted true for %v, but got false", f) + } + + if want == "false" && empty { + t.Fatalf("Wanted false for %v, but got true", f) + } + } +} + +func TestIsSymlink(t *testing.T) { + if runtime.GOOS == "windows" { + // XXX: creating symlinks is not supported in Go on + // Microsoft Windows. Skipping this this until a solution + // for creating symlinks is is provided. + t.Skip("skipping on windows") + } + + dir, err := ioutil.TempDir("", "dep") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dir) + + dirPath := filepath.Join(dir, "directory") + if err = os.MkdirAll(dirPath, 0777); err != nil { + t.Fatal(err) + } + + filePath := filepath.Join(dir, "file") + f, err := os.Create(filePath) + if err != nil { + t.Fatal(err) + } + f.Close() + + dirSymlink := filepath.Join(dir, "dirSymlink") + fileSymlink := filepath.Join(dir, "fileSymlink") + if err = os.Symlink(dirPath, dirSymlink); err != nil { + t.Fatal(err) + } + if err = os.Symlink(filePath, fileSymlink); err != nil { + t.Fatal(err) + } + + var ( + inaccessibleFile string + inaccessibleSymlink string + ) + + cleanup := setupInaccessibleDir(t, func(dir string) error { + inaccessibleFile = filepath.Join(dir, "file") + if fh, err := os.Create(inaccessibleFile); err != nil { + return err + } else if err = fh.Close(); err != nil { + return err + } + + inaccessibleSymlink = filepath.Join(dir, "symlink") + return os.Symlink(inaccessibleFile, inaccessibleSymlink) + }) + defer cleanup() + + tests := map[string]struct { + expected bool + err bool + }{ + dirPath: {false, false}, + filePath: {false, false}, + dirSymlink: {true, false}, + fileSymlink: {true, false}, + inaccessibleFile: {false, true}, + inaccessibleSymlink: {false, true}, + } + + for path, want := range tests { + got, err := IsSymlink(path) + if err != nil { + if !want.err { + t.Errorf("expected no error, got %v", err) + } + } + + if got != want.expected { + t.Errorf("expected %t for %s, got %t", want.expected, path, got) + } + } +} diff --git a/internal/fs/rename.go b/internal/fs/rename.go new file mode 100644 index 0000000000..3718d4575b --- /dev/null +++ b/internal/fs/rename.go @@ -0,0 +1,36 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !windows +// +build go1.8 + +package fs + +import ( + "os" + "syscall" + + "github.com/pkg/errors" +) + +func rename(src, dst string) error { + return os.Rename(src, dst) +} + +// renameFallback attempts to determine the appropriate fallback to failed rename +// operation depending on the resulting error. +func renameFallback(err error, src, dst string) error { + // Rename may fail if src and dst are on different devices; fall back to + // copy if we detect that case. syscall.EXDEV is the common name for the + // cross device link error which has varying output text across different + // operating systems. + terr, ok := err.(*os.LinkError) + if !ok { + return err + } else if terr.Err != syscall.EXDEV { + return errors.Wrapf(terr, "link error: cannot rename %s to %s", src, dst) + } + + return renameByCopy(src, dst) +} diff --git a/internal/fs/rename_go17.go b/internal/fs/rename_go17.go new file mode 100644 index 0000000000..33807b26ce --- /dev/null +++ b/internal/fs/rename_go17.go @@ -0,0 +1,50 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !windows +// +build !go1.8 + +package fs + +import ( + "os" + "syscall" + + "github.com/pkg/errors" +) + +func rename(src, dst string) error { + fi, err := os.Stat(src) + if err != nil { + return errors.Wrapf(err, "cannot stat %s", src) + } + + // In go 1.8, the behavior of os.Rename changed on non-Windows platforms. It no + // longer allows renames that would replace an existing directory. This has + // always been the case on Windows, though. + // + // For consistency, we replicate the go 1.8 behavior in earlier go versions here. + if dstfi, err := os.Stat(dst); fi.IsDir() && err == nil && dstfi.IsDir() { + return errors.Errorf("cannot rename directory %s to existing dst %s", src, dst) + } + + return os.Rename(src, dst) +} + +// renameFallback attempts to determine the appropriate fallback to failed rename +// operation depending on the resulting error. +func renameFallback(err error, src, dst string) error { + // Rename may fail if src and dst are on different devices; fall back to + // copy if we detect that case. syscall.EXDEV is the common name for the + // cross device link error which has varying output text across different + // operating systems. + terr, ok := err.(*os.LinkError) + if !ok { + return err + } else if terr.Err != syscall.EXDEV { + return errors.Wrapf(terr, "link error: cannot rename %s to %s", src, dst) + } + + return renameByCopy(src, dst) +} diff --git a/internal/fs/rename_windows.go b/internal/fs/rename_windows.go new file mode 100644 index 0000000000..9f1264b03e --- /dev/null +++ b/internal/fs/rename_windows.go @@ -0,0 +1,46 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package fs + +import ( + "os" + "syscall" + + "github.com/pkg/errors" +) + +func rename(src, dst string) error { + return os.Rename(src, dst) +} + +// renameFallback attempts to determine the appropriate fallback to failed rename +// operation depending on the resulting error. +func renameFallback(err error, src, dst string) error { + // Rename may fail if src and dst are on different devices; fall back to + // copy if we detect that case. syscall.EXDEV is the common name for the + // cross device link error which has varying output text across different + // operating systems. + terr, ok := err.(*os.LinkError) + if !ok { + return err + } + + if terr.Err != syscall.EXDEV { + // In windows it can drop down to an operating system call that + // returns an operating system error with a different number and + // message. Checking for that as a fall back. + noerr, ok := terr.Err.(syscall.Errno) + + // 0x11 (ERROR_NOT_SAME_DEVICE) is the windows error. + // See https://msdn.microsoft.com/en-us/library/cc231199.aspx + if ok && noerr != 0x11 { + return errors.Wrapf(terr, "link error: cannot rename %s to %s", src, dst) + } + } + + return renameByCopy(src, dst) +} diff --git a/internal/fs/testdata/test.file b/internal/fs/testdata/test.file new file mode 100644 index 0000000000..e69de29bb2 diff --git a/internal/fs_test.go b/internal/fs_test.go deleted file mode 100644 index e8391ebef9..0000000000 --- a/internal/fs_test.go +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package internal - -import ( - "io/ioutil" - "os" - "path/filepath" - "testing" -) - -func TestHasFilepathPrefix(t *testing.T) { - dir, err := ioutil.TempDir("", "dep") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir) - - cases := []struct { - dir string - prefix string - want bool - }{ - {filepath.Join(dir, "a", "b"), filepath.Join(dir), true}, - {filepath.Join(dir, "a", "b"), filepath.Join(dir, "a"), true}, - {filepath.Join(dir, "a", "b"), filepath.Join(dir, "a", "b"), true}, - {filepath.Join(dir, "a", "b"), filepath.Join(dir, "c"), false}, - {filepath.Join(dir, "a", "b"), filepath.Join(dir, "a", "d", "b"), false}, - {filepath.Join(dir, "a", "b"), filepath.Join(dir, "a", "b2"), false}, - {filepath.Join(dir, "ab"), filepath.Join(dir, "a", "b"), false}, - {filepath.Join(dir, "ab"), filepath.Join(dir, "a"), false}, - {filepath.Join(dir, "123"), filepath.Join(dir, "123"), true}, - {filepath.Join(dir, "123"), filepath.Join(dir, "1"), false}, - {filepath.Join(dir, "⌘"), filepath.Join(dir, "⌘"), true}, - {filepath.Join(dir, "a"), filepath.Join(dir, "⌘"), false}, - {filepath.Join(dir, "⌘"), filepath.Join(dir, "a"), false}, - } - - for _, c := range cases { - err := os.MkdirAll(c.dir, 0755) - if err != nil { - t.Fatal(err) - } - - err = os.MkdirAll(c.prefix, 0755) - if err != nil { - t.Fatal(err) - } - - got := HasFilepathPrefix(c.dir, c.prefix) - if c.want != got { - t.Fatalf("dir: %q, prefix: %q, expected: %v, got: %v", c.dir, c.prefix, c.want, got) - } - } -} - -func TestGenTestFilename(t *testing.T) { - cases := []struct { - str string - want string - }{ - {"abc", "Abc"}, - {"ABC", "aBC"}, - {"AbC", "abC"}, - {"αβγ", "Αβγ"}, - {"123", "123"}, - {"1a2", "1A2"}, - {"12a", "12A"}, - {"⌘", "⌘"}, - } - - for _, c := range cases { - got := genTestFilename(c.str) - if c.want != got { - t.Fatalf("str: %q, expected: %q, got: %q", c.str, c.want, got) - } - } -} - -func BenchmarkGenTestFilename(b *testing.B) { - cases := []string{ - "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", - "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", - "αααααααααααααααααααααααααααααααααααααααααααααααααααααααααααααααααααααααααααααααααααααααααααααααααααααααααααααααααααααααααααααααα", - "11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111", - "⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘⌘", - } - - for i := 0; i < b.N; i++ { - for _, str := range cases { - genTestFilename(str) - } - } -} diff --git a/internal/gps/_testdata/badrepo/README.md b/internal/gps/_testdata/badrepo/README.md new file mode 100644 index 0000000000..14232159c3 --- /dev/null +++ b/internal/gps/_testdata/badrepo/README.md @@ -0,0 +1,5 @@ +### Test Data + +This directory contains artifacts that represent malformed repo archives. Its purpose is to ensure `dep` can recover from such corrupted repositories in specific test scenarios. + +- `corrupt_dot_git_directory.tar`: is a repo with a corrupt `.git` directory. Dep can put a directory in such malformed state when a user hits `Ctrl+C` in the middle of a `dep init` process or others. `TestNewCtxRepoRecovery` uses this file to ensure recovery. diff --git a/internal/gps/_testdata/badrepo/corrupt_dot_git_directory.tar b/internal/gps/_testdata/badrepo/corrupt_dot_git_directory.tar new file mode 100644 index 0000000000..7a84731839 Binary files /dev/null and b/internal/gps/_testdata/badrepo/corrupt_dot_git_directory.tar differ diff --git a/internal/gps/_testdata/src/dotgodir/.go/dot.go b/internal/gps/_testdata/src/dotgodir/.go/dot.go new file mode 100644 index 0000000000..bb51d69f9a --- /dev/null +++ b/internal/gps/_testdata/src/dotgodir/.go/dot.go @@ -0,0 +1,7 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package dot + +// nothing to see here diff --git a/internal/gps/_testdata/src/disallow/.m1p/a.go b/internal/gps/_testdata/src/dotgodir/.m1p/a.go similarity index 86% rename from internal/gps/_testdata/src/disallow/.m1p/a.go rename to internal/gps/_testdata/src/dotgodir/.m1p/a.go index 6a88c12022..a5fc8c2e57 100644 --- a/internal/gps/_testdata/src/disallow/.m1p/a.go +++ b/internal/gps/_testdata/src/dotgodir/.m1p/a.go @@ -7,7 +7,7 @@ package m1p import ( "sort" - "github.com/golang/dep/gps" + "github.com/golang/dep/internal/gps" ) var ( diff --git a/internal/gps/_testdata/src/disallow/.m1p/b.go b/internal/gps/_testdata/src/dotgodir/.m1p/b.go similarity index 100% rename from internal/gps/_testdata/src/disallow/.m1p/b.go rename to internal/gps/_testdata/src/dotgodir/.m1p/b.go diff --git a/internal/gps/bridge.go b/internal/gps/bridge.go index 5e4a4a6d26..5ac75ff565 100644 --- a/internal/gps/bridge.go +++ b/internal/gps/bridge.go @@ -75,9 +75,8 @@ type bridge struct { down bool } -// Global factory func to create a bridge. This exists solely to allow tests to -// override it with a custom bridge and sm. -var mkBridge = func(s *solver, sm SourceManager, down bool) sourceBridge { +// mkBridge creates a bridge +func mkBridge(s *solver, sm SourceManager, down bool) *bridge { return &bridge{ sm: sm, s: s, diff --git a/internal/gps/constraint_test.go b/internal/gps/constraint_test.go index ab99063919..0aaf5f1709 100644 --- a/internal/gps/constraint_test.go +++ b/internal/gps/constraint_test.go @@ -674,6 +674,28 @@ func TestSemverConstraintOps(t *testing.T) { } } +func TestSemverConstraint_ImpliedCaret(t *testing.T) { + c, _ := NewSemverConstraintIC("1.0.0") + + wantS := "^1.0.0" + gotS := c.String() + if wantS != gotS { + t.Errorf("Expected string %s, got %s", wantS, gotS) + } + + wantI := "1.0.0" + gotI := c.ImpliedCaretString() + if wantI != gotI { + t.Errorf("Expected implied string %s, got %s", wantI, gotI) + } + + wantT := "svc-^1.0.0" + gotT := c.typedString() + if wantT != gotT { + t.Errorf("Expected type string %s, got %s", wantT, gotT) + } +} + // Test that certain types of cross-version comparisons work when they are // expressed as a version union (but that others don't). func TestVersionUnion(t *testing.T) { diff --git a/internal/gps/constraints.go b/internal/gps/constraints.go index cb9d4f5ae1..143c786c9a 100644 --- a/internal/gps/constraints.go +++ b/internal/gps/constraints.go @@ -25,6 +25,14 @@ var ( type Constraint interface { fmt.Stringer + // ImpliedCaretString converts the Constraint to a string in the same manner + // as String(), but treats the empty operator as equivalent to ^, rather + // than =. + // + // In the same way that String() is the inverse of NewConstraint(), this + // method is the inverse of NewSemverConstraintIC(). + ImpliedCaretString() string + // Matches indicates if the provided Version is allowed by the Constraint. Matches(Version) bool @@ -58,7 +66,27 @@ func NewSemverConstraint(body string) (Constraint, error) { } // If we got a simple semver.Version, simplify by returning our // corresponding type - if sv, ok := c.(*semver.Version); ok { + if sv, ok := c.(semver.Version); ok { + return semVersion{sv: sv}, nil + } + return semverConstraint{c: c}, nil +} + +// NewSemverConstraintIC attempts to construct a semver Constraint object from the +// input string, defaulting to a caret, ^, when no operator is specified. Put +// differently, ^ is the default operator for NewSemverConstraintIC, while = +// is the default operator for NewSemverConstraint. +// +// If the input string cannot be made into a valid semver Constraint, an error +// is returned. +func NewSemverConstraintIC(body string) (Constraint, error) { + c, err := semver.NewConstraintIC(body) + if err != nil { + return nil, err + } + // If we got a simple semver.Version, simplify by returning our + // corresponding type + if sv, ok := c.(semver.Version); ok { return semVersion{sv: sv}, nil } return semverConstraint{c: c}, nil @@ -72,6 +100,16 @@ func (c semverConstraint) String() string { return c.c.String() } +// ImpliedCaretString converts the Constraint to a string in the same manner +// as String(), but treats the empty operator as equivalent to ^, rather +// than =. +// +// In the same way that String() is the inverse of NewConstraint(), this +// method is the inverse of NewSemverConstraintIC(). +func (c semverConstraint) ImpliedCaretString() string { + return c.c.ImpliedCaretString() +} + func (c semverConstraint) typedString() string { return fmt.Sprintf("svc-%s", c.c.String()) } @@ -153,6 +191,10 @@ func (anyConstraint) String() string { return "*" } +func (anyConstraint) ImpliedCaretString() string { + return "*" +} + func (anyConstraint) typedString() string { return "any-*" } @@ -177,6 +219,10 @@ func (noneConstraint) String() string { return "" } +func (noneConstraint) ImpliedCaretString() string { + return "" +} + func (noneConstraint) typedString() string { return "none-" } diff --git a/internal/gps/deduce.go b/internal/gps/deduce.go index a5c2dd0e29..fe6a64cdb3 100644 --- a/internal/gps/deduce.go +++ b/internal/gps/deduce.go @@ -6,7 +6,6 @@ package gps import ( "context" - "errors" "fmt" "io" "net/http" @@ -18,6 +17,7 @@ import ( "sync" radix "github.com/armon/go-radix" + "github.com/pkg/errors" ) var ( @@ -27,6 +27,8 @@ var ( svnSchemes = []string{"https", "http", "svn", "svn+ssh"} ) +const gopkgUnstableSuffix = "-unstable" + func validateVCSScheme(scheme, typ string) bool { // everything allows plain ssh if scheme == "ssh" { @@ -276,10 +278,18 @@ func (m gopkginDeducer) deduceSource(p string, u *url.URL) (maybeSource, error) } else { u.Path = path.Join(v[2], v[3]) } - major, err := strconv.ParseUint(v[4][1:], 10, 64) + + unstable := false + majorStr := v[4] + + if strings.HasSuffix(majorStr, gopkgUnstableSuffix) { + unstable = true + majorStr = strings.TrimSuffix(majorStr, gopkgUnstableSuffix) + } + major, err := strconv.ParseUint(majorStr[1:], 10, 64) if err != nil { // this should only be reachable if there's an error in the regex - return nil, fmt.Errorf("could not parse %q as a gopkg.in major version", v[4][1:]) + return nil, fmt.Errorf("could not parse %q as a gopkg.in major version", majorStr[1:]) } mb := make(maybeSources, len(gitSchemes)) @@ -290,9 +300,10 @@ func (m gopkginDeducer) deduceSource(p string, u *url.URL) (maybeSource, error) } u2.Scheme = scheme mb[k] = maybeGopkginSource{ - opath: v[1], - url: &u2, - major: major, + opath: v[1], + url: &u2, + major: major, + unstable: unstable, } } @@ -696,6 +707,7 @@ func (hmd *httpMetadataDeducer) deduce(ctx context.Context, path string) (pathDe opath := path u, path, err := normalizeURI(path) if err != nil { + err = errors.Wrapf(err, "unable to normalize URI") hmd.deduceErr = err return } @@ -705,20 +717,25 @@ func (hmd *httpMetadataDeducer) deduce(ctx context.Context, path string) (pathDe // Make the HTTP call to attempt to retrieve go-get metadata var root, vcs, reporoot string err = hmd.suprvsr.do(ctx, path, ctHTTPMetadata, func(ctx context.Context) error { - root, vcs, reporoot, err = parseMetadata(ctx, path, u.Scheme) + root, vcs, reporoot, err = getMetadata(ctx, path, u.Scheme) + if err != nil { + err = errors.Wrapf(err, "unable to read metadata") + } return err }) if err != nil { - hmd.deduceErr = fmt.Errorf("unable to deduce repository and source type for: %q", opath) + err = errors.Wrapf(err, "unable to deduce repository and source type for %q", opath) + hmd.deduceErr = err return } pd.root = root - // If we got something back at all, then it supercedes the actual input for + // If we got something back at all, then it supersedes the actual input for // the real URL to hit repoURL, err := url.Parse(reporoot) if err != nil { - hmd.deduceErr = fmt.Errorf("server returned bad URL in go-get metadata: %q", reporoot) + err = errors.Wrapf(err, "server returned bad URL in go-get metadata, reporoot=%q", reporoot) + hmd.deduceErr = err return } @@ -731,7 +748,7 @@ func (hmd *httpMetadataDeducer) deduce(ctx context.Context, path string) (pathDe // To err on the secure side, do NOT allow the same in the other // direction (https -> http). if u.Scheme != "http" || repoURL.Scheme != "https" { - hmd.deduceErr = fmt.Errorf("scheme mismatch for %q: input asked for %q, but go-get metadata specified %q", path, u.Scheme, repoURL.Scheme) + hmd.deduceErr = errors.Errorf("scheme mismatch for %q: input asked for %q, but go-get metadata specified %q", path, u.Scheme, repoURL.Scheme) return } } @@ -744,7 +761,7 @@ func (hmd *httpMetadataDeducer) deduce(ctx context.Context, path string) (pathDe case "hg": pd.mb = maybeHgSource{url: repoURL} default: - hmd.deduceErr = fmt.Errorf("unsupported vcs type %s in go-get metadata from %s", vcs, path) + hmd.deduceErr = errors.Errorf("unsupported vcs type %s in go-get metadata from %s", vcs, path) return } @@ -781,7 +798,7 @@ func normalizeURI(p string) (u *url.URL, newpath string, err error) { } else { u, err = url.Parse(p) if err != nil { - return nil, "", fmt.Errorf("%q is not a valid URI", p) + return nil, "", errors.Errorf("%q is not a valid URI", p) } } @@ -794,7 +811,7 @@ func normalizeURI(p string) (u *url.URL, newpath string, err error) { } if !pathvld.MatchString(newpath) { - return nil, "", fmt.Errorf("%q is not a valid import path", newpath) + return nil, "", errors.Errorf("%q is not a valid import path", newpath) } return @@ -802,12 +819,6 @@ func normalizeURI(p string) (u *url.URL, newpath string, err error) { // fetchMetadata fetches the remote metadata for path. func fetchMetadata(ctx context.Context, path, scheme string) (rc io.ReadCloser, err error) { - defer func() { - if err != nil { - err = fmt.Errorf("unable to determine remote metadata protocol: %s", err) - } - }() - if scheme == "http" { rc, err = doFetchMetadata(ctx, "http", path) return @@ -828,35 +839,35 @@ func doFetchMetadata(ctx context.Context, scheme, path string) (io.ReadCloser, e case "https", "http": req, err := http.NewRequest("GET", url, nil) if err != nil { - return nil, fmt.Errorf("failed to access url %q", url) + return nil, errors.Wrapf(err, "unable to build HTTP request for URL %q", url) } resp, err := http.DefaultClient.Do(req.WithContext(ctx)) if err != nil { - return nil, fmt.Errorf("failed to access url %q", url) + return nil, errors.Wrapf(err, "failed HTTP request to URL %q", url) } return resp.Body, nil default: - return nil, fmt.Errorf("unknown remote protocol scheme: %q", scheme) + return nil, errors.Errorf("unknown remote protocol scheme: %q", scheme) } } -// parseMetadata fetches and decodes remote metadata for path. +// getMetadata fetches and decodes remote metadata for path. // // scheme is optional. If it's http, only http will be attempted for fetching. // Any other scheme (including none) will first try https, then fall back to // http. -func parseMetadata(ctx context.Context, path, scheme string) (string, string, string, error) { +func getMetadata(ctx context.Context, path, scheme string) (string, string, string, error) { rc, err := fetchMetadata(ctx, path, scheme) if err != nil { - return "", "", "", err + return "", "", "", errors.Wrapf(err, "unable to fetch raw metadata") } defer rc.Close() imports, err := parseMetaGoImports(rc) if err != nil { - return "", "", "", err + return "", "", "", errors.Wrapf(err, "unable to parse go-import metadata") } match := -1 for i, im := range imports { @@ -864,12 +875,12 @@ func parseMetadata(ctx context.Context, path, scheme string) (string, string, st continue } if match != -1 { - return "", "", "", fmt.Errorf("multiple meta tags match import path %q", path) + return "", "", "", errors.Errorf("multiple meta tags match import path %q", path) } match = i } if match == -1 { - return "", "", "", fmt.Errorf("go-import metadata not found") + return "", "", "", errors.Errorf("go-import metadata not found") } return imports[match].Prefix, imports[match].VCS, imports[match].RepoRoot, nil } diff --git a/internal/gps/deduce_test.go b/internal/gps/deduce_test.go index 30d2fba01e..961fcf7e8f 100644 --- a/internal/gps/deduce_test.go +++ b/internal/gps/deduce_test.go @@ -587,21 +587,16 @@ func TestDeduceFromPath(t *testing.T) { } }) } - for typ, fixtures := range pathDeductionFixtures { - typ, fixtures := typ, fixtures - t.Run("first", func(t *testing.T) { + runSet := func(t *testing.T) { + for typ, fixtures := range pathDeductionFixtures { do(typ, fixtures, t) - }) + } } + t.Run("first", runSet) // Run the test set twice to ensure results are correct for both cached // and uncached deductions. - for typ, fixtures := range pathDeductionFixtures { - typ, fixtures := typ, fixtures - t.Run("second", func(t *testing.T) { - do(typ, fixtures, t) - }) - } + t.Run("second", runSet) } func TestVanityDeduction(t *testing.T) { diff --git a/internal/gps/hash.go b/internal/gps/hash.go index a0bb0799cf..61c7b5fcba 100644 --- a/internal/gps/hash.go +++ b/internal/gps/hash.go @@ -11,8 +11,6 @@ import ( "sort" "strconv" "strings" - - "github.com/golang/dep/internal/gps/pkgtree" ) // string headers used to demarcate sections in hash input creation @@ -63,7 +61,7 @@ func (s *solver) writeHashingInputs(w io.Writer) { // getApplicableConstraints will apply overrides, incorporate requireds, // apply local ignores, drop stdlib imports, and finally trim out // ineffectual constraints. - for _, pd := range s.rd.getApplicableConstraints() { + for _, pd := range s.rd.getApplicableConstraints(s.stdLibFn) { writeString(string(pd.Ident.ProjectRoot)) writeString(pd.Ident.Source) writeString(pd.Constraint.typedString()) @@ -71,7 +69,7 @@ func (s *solver) writeHashingInputs(w io.Writer) { // Write out each discrete import, including those derived from requires. writeString(hhImportsReqs) - imports := s.rd.externalImportList() + imports := s.rd.externalImportList(s.stdLibFn) sort.Strings(imports) for _, im := range imports { writeString(im) @@ -133,25 +131,3 @@ func HashingInputsAsString(s Solver) string { return (*bytes.Buffer)(buf).String() } - -type sortPackageOrErr []pkgtree.PackageOrErr - -func (s sortPackageOrErr) Len() int { return len(s) } -func (s sortPackageOrErr) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -func (s sortPackageOrErr) Less(i, j int) bool { - a, b := s[i], s[j] - if a.Err != nil || b.Err != nil { - // Sort errors last. - if b.Err == nil { - return false - } - if a.Err == nil { - return true - } - // And then by string. - return a.Err.Error() < b.Err.Error() - } - // And finally, sort by import path. - return a.P.ImportPath < b.P.ImportPath -} diff --git a/internal/gps/hash_test.go b/internal/gps/hash_test.go index 4f241a4354..220eebae5d 100644 --- a/internal/gps/hash_test.go +++ b/internal/gps/hash_test.go @@ -21,6 +21,8 @@ func TestHashInputs(t *testing.T) { RootPackageTree: fix.rootTree(), Manifest: fix.rootmanifest(), ProjectAnalyzer: naiveAnalyzer{}, + stdLibFn: func(string) bool { return false }, + mkBridgeFn: overrideMkBridge, } s, err := Prepare(params, newdepspecSM(fix.ds, nil)) @@ -72,6 +74,8 @@ func TestHashInputsReqsIgs(t *testing.T) { RootPackageTree: fix.rootTree(), Manifest: rm, ProjectAnalyzer: naiveAnalyzer{}, + stdLibFn: func(string) bool { return false }, + mkBridgeFn: overrideMkBridge, } s, err := Prepare(params, newdepspecSM(fix.ds, nil)) @@ -201,6 +205,8 @@ func TestHashInputsOverrides(t *testing.T) { RootPackageTree: basefix.rootTree(), Manifest: rm, ProjectAnalyzer: naiveAnalyzer{}, + stdLibFn: func(string) bool { return false }, + mkBridgeFn: overrideMkBridge, } table := []struct { diff --git a/internal/gps/internal/fs/fs.go b/internal/gps/internal/fs/fs.go deleted file mode 100644 index c6693c9075..0000000000 --- a/internal/gps/internal/fs/fs.go +++ /dev/null @@ -1,174 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package fs - -import ( - "errors" - "io" - "io/ioutil" - "os" - "path/filepath" - "runtime" - "syscall" -) - -// RenameWithFallback attempts to rename a file or directory, but falls back to -// copying in the event of a cross-link device error. If the fallback copy -// succeeds, src is still removed, emulating normal rename behavior. -func RenameWithFallback(src, dest string) error { - fi, err := os.Stat(src) - if err != nil { - return err - } - - err = os.Rename(src, dest) - if err == nil { - return nil - } - - terr, ok := err.(*os.LinkError) - if !ok { - return err - } - - // Rename may fail if src and dest are on different devices; fall back to - // copy if we detect that case. syscall.EXDEV is the common name for the - // cross device link error which has varying output text across different - // operating systems. - var cerr error - if terr.Err == syscall.EXDEV { - if fi.IsDir() { - cerr = CopyDir(src, dest) - } else { - cerr = CopyFile(src, dest) - } - } else if runtime.GOOS == "windows" { - // In windows it can drop down to an operating system call that - // returns an operating system error with a different number and - // message. Checking for that as a fall back. - noerr, ok := terr.Err.(syscall.Errno) - // 0x11 (ERROR_NOT_SAME_DEVICE) is the windows error. - // See https://msdn.microsoft.com/en-us/library/cc231199.aspx - if ok && noerr == 0x11 { - if fi.IsDir() { - cerr = CopyDir(src, dest) - } else { - cerr = CopyFile(src, dest) - } - } - } else { - return terr - } - - if cerr != nil { - return cerr - } - - return os.RemoveAll(src) -} - -var ( - errSrcNotDir = errors.New("source is not a directory") - errDestExist = errors.New("destination already exists") -) - -// CopyDir recursively copies a directory tree, attempting to preserve permissions. -// Source directory must exist, destination directory must *not* exist. -// Symlinks are ignored and skipped. -func CopyDir(src string, dst string) (err error) { - src = filepath.Clean(src) - dst = filepath.Clean(dst) - - si, err := os.Stat(src) - if err != nil { - return err - } - if !si.IsDir() { - return errSrcNotDir - } - - _, err = os.Stat(dst) - if err != nil && !os.IsNotExist(err) { - return - } - if err == nil { - return errDestExist - } - - err = os.MkdirAll(dst, si.Mode()) - if err != nil { - return - } - - entries, err := ioutil.ReadDir(src) - if err != nil { - return - } - - for _, entry := range entries { - srcPath := filepath.Join(src, entry.Name()) - dstPath := filepath.Join(dst, entry.Name()) - - if entry.IsDir() { - err = CopyDir(srcPath, dstPath) - if err != nil { - return - } - } else { - // This will include symlinks, which is what we want in all cases - // where gps is copying things. - err = CopyFile(srcPath, dstPath) - if err != nil { - return - } - } - } - - return -} - -// CopyFile copies the contents of the file named src to the file named -// by dst. The file will be created if it does not already exist. If the -// destination file exists, all it's contents will be replaced by the contents -// of the source file. The file mode will be copied from the source and -// the copied data is synced/flushed to stable storage. -func CopyFile(src, dst string) (err error) { - in, err := os.Open(src) - if err != nil { - return - } - defer in.Close() - - out, err := os.Create(dst) - if err != nil { - return - } - defer func() { - if e := out.Close(); e != nil { - err = e - } - }() - - _, err = io.Copy(out, in) - if err != nil { - return - } - - err = out.Sync() - if err != nil { - return - } - - si, err := os.Stat(src) - if err != nil { - return - } - err = os.Chmod(dst, si.Mode()) - if err != nil { - return - } - - return -} diff --git a/internal/gps/internal/fs/fs_test.go b/internal/gps/internal/fs/fs_test.go deleted file mode 100644 index 3209c58938..0000000000 --- a/internal/gps/internal/fs/fs_test.go +++ /dev/null @@ -1,135 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package fs - -import ( - "fmt" - "io/ioutil" - "os" - "path/filepath" - "testing" -) - -func isDir(name string) (bool, error) { - fi, err := os.Stat(name) - if os.IsNotExist(err) { - return false, nil - } - if err != nil { - return false, err - } - if !fi.IsDir() { - return false, fmt.Errorf("%q is not a directory", name) - } - return true, nil -} - -func TestCopyDir(t *testing.T) { - dir, err := ioutil.TempDir("", "gps") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir) - - srcdir := filepath.Join(dir, "src") - if err := os.MkdirAll(srcdir, 0755); err != nil { - t.Fatal(err) - } - - srcf, err := os.Create(filepath.Join(srcdir, "myfile")) - if err != nil { - t.Fatal(err) - } - - contents := "hello world" - if _, err := srcf.Write([]byte(contents)); err != nil { - t.Fatal(err) - } - srcf.Close() - - destdir := filepath.Join(dir, "dest") - if err := CopyDir(srcdir, destdir); err != nil { - t.Fatal(err) - } - - dirOK, err := isDir(destdir) - if err != nil { - t.Fatal(err) - } - if !dirOK { - t.Fatalf("expected %s to be a directory", destdir) - } - - destf := filepath.Join(destdir, "myfile") - destcontents, err := ioutil.ReadFile(destf) - if err != nil { - t.Fatal(err) - } - - if contents != string(destcontents) { - t.Fatalf("expected: %s, got: %s", contents, string(destcontents)) - } - - srcinfo, err := os.Stat(srcf.Name()) - if err != nil { - t.Fatal(err) - } - - destinfo, err := os.Stat(destf) - if err != nil { - t.Fatal(err) - } - - if srcinfo.Mode() != destinfo.Mode() { - t.Fatalf("expected %s: %#v\n to be the same mode as %s: %#v", srcf.Name(), srcinfo.Mode(), destf, destinfo.Mode()) - } -} - -func TestCopyFile(t *testing.T) { - dir, err := ioutil.TempDir("", "gps") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir) - - srcf, err := os.Create(filepath.Join(dir, "srcfile")) - if err != nil { - t.Fatal(err) - } - - contents := "hello world" - if _, err := srcf.Write([]byte(contents)); err != nil { - t.Fatal(err) - } - srcf.Close() - - destf := filepath.Join(dir, "destf") - if err := CopyFile(srcf.Name(), destf); err != nil { - t.Fatal(err) - } - - destcontents, err := ioutil.ReadFile(destf) - if err != nil { - t.Fatal(err) - } - - if contents != string(destcontents) { - t.Fatalf("expected: %s, got: %s", contents, string(destcontents)) - } - - srcinfo, err := os.Stat(srcf.Name()) - if err != nil { - t.Fatal(err) - } - - destinfo, err := os.Stat(destf) - if err != nil { - t.Fatal(err) - } - - if srcinfo.Mode() != destinfo.Mode() { - t.Fatalf("expected %s: %#v\n to be the same mode as %s: %#v", srcf.Name(), srcinfo.Mode(), destf, destinfo.Mode()) - } -} diff --git a/internal/gps/internal/internal.go b/internal/gps/internal/internal.go deleted file mode 100644 index c575446c0e..0000000000 --- a/internal/gps/internal/internal.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package internal provides support for gps own packages. -package internal - -import "strings" - -// IsStdLib is a reference to internal implementation. -// It is stored as a var so that tests can swap it out. Ugh globals, ugh. -var IsStdLib = doIsStdLib - -// This was lovingly lifted from src/cmd/go/pkg.go in Go's code -// (isStandardImportPath). -func doIsStdLib(path string) bool { - i := strings.Index(path, "/") - if i < 0 { - i = len(path) - } - - return !strings.Contains(path[:i], ".") -} diff --git a/internal/gps/lock.go b/internal/gps/lock.go index 2e7b787e72..af728dd18f 100644 --- a/internal/gps/lock.go +++ b/internal/gps/lock.go @@ -17,9 +17,6 @@ import ( // solution is all that would be necessary to constitute a lock file, though // tools can include whatever other information they want in their storage. type Lock interface { - // Indicates the version of the solver used to generate this lock data - //SolverVersion() string - // The hash of inputs to gps that resulted in this lock data InputHash() []byte diff --git a/internal/gps/manager_test.go b/internal/gps/manager_test.go index 0f71ca42e1..9bf5a1c415 100644 --- a/internal/gps/manager_test.go +++ b/internal/gps/manager_test.go @@ -16,12 +16,8 @@ import ( "sync/atomic" "testing" "time" - - "github.com/Masterminds/semver" ) -var bd string - // An analyzer that passes nothing back, but doesn't error. This is the naive // case - no constraints, no lock, and no errors. The SourceManager will // interpret this as open/Any constraints on everything in the import graph. @@ -35,15 +31,6 @@ func (a naiveAnalyzer) Info() (name string, version int) { return "naive-analyzer", 1 } -func sv(s string) *semver.Version { - sv, err := semver.NewVersion(s) - if err != nil { - panic(fmt.Sprintf("Error creating semver from %q: %s", s, err)) - } - - return sv -} - func mkNaiveSM(t *testing.T) (*SourceMgr, func()) { cpath, err := ioutil.TempDir("", "smcache") if err != nil { @@ -82,11 +69,6 @@ func remakeNaiveSM(osm *SourceMgr, t *testing.T) (*SourceMgr, func()) { } } -func init() { - _, filename, _, _ := runtime.Caller(1) - bd = path.Dir(filename) -} - func TestSourceManagerInit(t *testing.T) { cpath, err := ioutil.TempDir("", "smcache") if err != nil { diff --git a/internal/gps/manifest.go b/internal/gps/manifest.go index aaed367449..d70a3789fa 100644 --- a/internal/gps/manifest.go +++ b/internal/gps/manifest.go @@ -34,7 +34,7 @@ type RootManifest interface { Manifest // Overrides returns a list of ProjectConstraints that will unconditionally - // supercede any ProjectConstraint declarations made in either the root + // supersede any ProjectConstraint declarations made in either the root // manifest, or in any dependency's manifest. // // Overrides are a special control afforded only to root manifests. Tool diff --git a/internal/gps/maybe_source.go b/internal/gps/maybe_source.go index b892ae0280..9b2927ae7d 100644 --- a/internal/gps/maybe_source.go +++ b/internal/gps/maybe_source.go @@ -84,14 +84,15 @@ func (m maybeGitSource) try(ctx context.Context, cachedir string, c singleSource ustr := m.url.String() path := filepath.Join(cachedir, "sources", sanitizer.Replace(ustr)) - r, err := vcs.NewGitRepo(ustr, path) + r, err := newCtxRepo(vcs.Git, ustr, path) + if err != nil { return nil, 0, unwrapVcsErr(err) } src := &gitSource{ baseVCSSource: baseVCSSource{ - repo: &gitRepo{r}, + repo: r, }, } @@ -131,6 +132,8 @@ type maybeGopkginSource struct { url *url.URL // the major version to apply for filtering major uint64 + // whether or not the source package is "unstable" + unstable bool } func (m maybeGopkginSource) try(ctx context.Context, cachedir string, c singleSourceCache, superv *supervisor) (source, sourceState, error) { @@ -139,8 +142,8 @@ func (m maybeGopkginSource) try(ctx context.Context, cachedir string, c singleSo // So, it's OK to just dumb-join the scheme with the path. path := filepath.Join(cachedir, "sources", sanitizer.Replace(m.url.Scheme+"/"+m.opath)) ustr := m.url.String() + r, err := newCtxRepo(vcs.Git, ustr, path) - r, err := vcs.NewGitRepo(ustr, path) if err != nil { return nil, 0, unwrapVcsErr(err) } @@ -148,10 +151,11 @@ func (m maybeGopkginSource) try(ctx context.Context, cachedir string, c singleSo src := &gopkginSource{ gitSource: gitSource{ baseVCSSource: baseVCSSource{ - repo: &gitRepo{r}, + repo: r, }, }, - major: m.major, + major: m.major, + unstable: m.unstable, } var vl []PairedVersion @@ -187,7 +191,8 @@ func (m maybeBzrSource) try(ctx context.Context, cachedir string, c singleSource ustr := m.url.String() path := filepath.Join(cachedir, "sources", sanitizer.Replace(ustr)) - r, err := vcs.NewBzrRepo(ustr, path) + r, err := newCtxRepo(vcs.Bzr, ustr, path) + if err != nil { return nil, 0, unwrapVcsErr(err) } @@ -209,7 +214,7 @@ func (m maybeBzrSource) try(ctx context.Context, cachedir string, c singleSource src := &bzrSource{ baseVCSSource: baseVCSSource{ - repo: &bzrRepo{r}, + repo: r, }, } @@ -228,7 +233,8 @@ func (m maybeHgSource) try(ctx context.Context, cachedir string, c singleSourceC ustr := m.url.String() path := filepath.Join(cachedir, "sources", sanitizer.Replace(ustr)) - r, err := vcs.NewHgRepo(ustr, path) + r, err := newCtxRepo(vcs.Hg, ustr, path) + if err != nil { return nil, 0, unwrapVcsErr(err) } @@ -250,7 +256,7 @@ func (m maybeHgSource) try(ctx context.Context, cachedir string, c singleSourceC src := &hgSource{ baseVCSSource: baseVCSSource{ - repo: &hgRepo{r}, + repo: r, }, } diff --git a/internal/gps/paths/paths.go b/internal/gps/paths/paths.go new file mode 100644 index 0000000000..3c18defe9c --- /dev/null +++ b/internal/gps/paths/paths.go @@ -0,0 +1,21 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package paths + +import "strings" + +// IsStandardImportPath reports whether $GOROOT/src/path should be considered +// part of the standard distribution. For historical reasons we allow people to add +// their own code to $GOROOT instead of using $GOPATH, but we assume that +// code will start with a domain name (dot in the first element). +// This was loving taken from src/cmd/go/pkg.go in Go's code (isStandardImportPath). +func IsStandardImportPath(path string) bool { + i := strings.Index(path, "/") + if i < 0 { + i = len(path) + } + + return !strings.Contains(path[:i], ".") +} diff --git a/internal/gps/internal/internal_test.go b/internal/gps/paths/paths_test.go similarity index 79% rename from internal/gps/internal/internal_test.go rename to internal/gps/paths/paths_test.go index 6e94086534..aaaafc1cf1 100644 --- a/internal/gps/internal/internal_test.go +++ b/internal/gps/paths/paths_test.go @@ -2,11 +2,11 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package internal +package paths import "testing" -func TestIsStdLib(t *testing.T) { +func TestIsStandardImportPath(t *testing.T) { fix := []struct { ip string is bool @@ -14,11 +14,13 @@ func TestIsStdLib(t *testing.T) { {"appengine", true}, {"net/http", true}, {"github.com/anything", false}, + {"github.com", false}, {"foo", true}, + {".", false}, } for _, f := range fix { - r := doIsStdLib(f.ip) + r := IsStandardImportPath(f.ip) if r != f.is { if r { t.Errorf("%s was marked stdlib but should not have been", f.ip) diff --git a/internal/gps/pkgtree/pkgtree.go b/internal/gps/pkgtree/pkgtree.go index a6b452764d..f14598c286 100644 --- a/internal/gps/pkgtree/pkgtree.go +++ b/internal/gps/pkgtree/pkgtree.go @@ -28,6 +28,15 @@ type Package struct { TestImports []string // Imports from all go test files (in go/build parlance: both TestImports and XTestImports) } +// vcsRoots is a set of directories we should not descend into in ListPackages when +// searching for Go packages +var vcsRoots = map[string]struct{}{ + ".git": struct{}{}, + ".bzr": struct{}{}, + ".svn": struct{}{}, + ".hg": struct{}{}, +} + // ListPackages reports Go package information about all directories in the tree // at or below the provided fileRoot. // @@ -78,10 +87,13 @@ func ListPackages(fileRoot, importRoot string) (PackageTree, error) { case "vendor", "Godeps": return filepath.SkipDir } - // We do skip dot-dirs, though, because it's such a ubiquitous standard - // that they not be visited by normal commands, and because things get - // really weird if we don't. - if strings.HasPrefix(fi.Name(), ".") { + + // Skip dirs that are known to be VCS roots. + // + // Note that there are some pathological edge cases this doesn't cover, + // such as a user using Git for version control, but having a package + // named "svn" in a directory named ".svn". + if _, ok := vcsRoots[fi.Name()]; ok { return filepath.SkipDir } @@ -614,7 +626,15 @@ func wmToReach(workmap map[string]wm, backprop bool) (ReachMap, map[string]*Prob // that was already completed (black), we don't have to defend against // an empty path here. - fromErr := errmap[from] + fromErr, exists := errmap[from] + // FIXME: It should not be possible for fromErr to not exist, + // See issue https://github.com/golang/dep/issues/351 + // This is a temporary solution to avoid a panic. + if !exists { + fromErr = &ProblemImportError{ + Err: fmt.Errorf("unknown error for %q, if you get this error see https://github.com/golang/dep/issues/351", from), + } + } err := &ProblemImportError{ Err: fromErr.Err, Cause: make([]string, 0, len(path)+len(fromErr.Cause)+1), diff --git a/internal/gps/pkgtree/pkgtree_test.go b/internal/gps/pkgtree/pkgtree_test.go index 092782c0d8..ab134f02e7 100644 --- a/internal/gps/pkgtree/pkgtree_test.go +++ b/internal/gps/pkgtree/pkgtree_test.go @@ -17,25 +17,10 @@ import ( "strings" "testing" - "github.com/golang/dep/internal/gps/internal" - "github.com/golang/dep/internal/gps/internal/fs" + "github.com/golang/dep/internal/fs" + "github.com/golang/dep/internal/gps/paths" ) -// Stores a reference to original IsStdLib, so we could restore overridden version. -var doIsStdLib = internal.IsStdLib - -func init() { - overrideIsStdLib() -} - -// sets the IsStdLib func to always return false, otherwise it would identify -// pretty much all of our fixtures as being stdlib and skip everything. -func overrideIsStdLib() { - internal.IsStdLib = func(path string) bool { - return false - } -} - // PackageTree.ToReachMap() uses an easily separable algorithm, wmToReach(), // to turn a discovered set of packages and their imports into a proper pair of // internal and external reach maps. @@ -1073,20 +1058,6 @@ func TestListPackages(t *testing.T) { }, }, }, - // disallow/.m1p is ignored by listPackages...for now. Kept - // here commented because this might change again... - //"disallow/.m1p": { - //P: Package{ - //ImportPath: "disallow/.m1p", - //CommentPath: "", - //Name: "m1p", - //Imports: []string{ - //"github.com/golang/dep/internal/gps", - //"os", - //"sort", - //}, - //}, - //}, "disallow/testdata": { P: Package{ ImportPath: "disallow/testdata", @@ -1280,7 +1251,7 @@ func TestListPackages(t *testing.T) { }, }, }, - "skip directories starting with '.'": { + "does not skip directories starting with '.'": { fileRoot: j("dotgodir"), importRoot: "dotgodir", out: PackageTree{ @@ -1292,6 +1263,13 @@ func TestListPackages(t *testing.T) { Imports: []string{}, }, }, + "dotgodir/.go": { + P: Package{ + ImportPath: "dotgodir/.go", + Name: "dot", + Imports: []string{}, + }, + }, "dotgodir/foo.go": { P: Package{ ImportPath: "dotgodir/foo.go", @@ -1299,6 +1277,18 @@ func TestListPackages(t *testing.T) { Imports: []string{"sort"}, }, }, + "dotgodir/.m1p": { + P: Package{ + ImportPath: "dotgodir/.m1p", + CommentPath: "", + Name: "m1p", + Imports: []string{ + "github.com/golang/dep/internal/gps", + "os", + "sort", + }, + }, + }, }, }, }, @@ -1725,22 +1715,6 @@ func TestFlattenReachMap(t *testing.T) { t.Fatalf("listPackages failed on varied test case: %s", err) } - var expect []string - var name string - var ignore map[string]bool - var stdlib, main, tests bool - - validate := func() { - rm, em := vptree.ToReachMap(main, tests, true, ignore) - if len(em) != 0 { - t.Errorf("Should not have any error pkgs from ToReachMap, got %s", em) - } - result := rm.Flatten(stdlib) - if !reflect.DeepEqual(expect, result) { - t.Errorf("Wrong imports in %q case:\n\t(GOT): %s\n\t(WNT): %s", name, result, expect) - } - } - all := []string{ "encoding/binary", "github.com/Masterminds/semver", @@ -1752,11 +1726,11 @@ func TestFlattenReachMap(t *testing.T) { "sort", } - // helper to rewrite expect, except for a couple packages + // helper to generate testCase.expect as: all, except for a couple packages // // this makes it easier to see what we're taking out on each test - except := func(not ...string) { - expect = make([]string, len(all)-len(not)) + except := func(not ...string) []string { + expect := make([]string, len(all)-len(not)) drop := make(map[string]bool) for _, npath := range not { @@ -1770,113 +1744,159 @@ func TestFlattenReachMap(t *testing.T) { k++ } } + return expect } - // everything on - name = "simple" - except() - stdlib, main, tests = true, true, true - validate() - - // turning off stdlib should cut most things, but we need to override the - // function - internal.IsStdLib = doIsStdLib - name = "no stdlib" - stdlib = false - except("encoding/binary", "go/parser", "hash", "net/http", "os", "sort") - validate() - // restore stdlib func override - overrideIsStdLib() - - // stdlib back in; now exclude tests, which should just cut one - name = "no tests" - stdlib, tests = true, false - except("encoding/binary") - validate() - - // Now skip main, which still just cuts out one - name = "no main" - main, tests = false, true - except("net/http") - validate() - - // No test and no main, which should be additive - name = "no test, no main" - main, tests = false, false - except("net/http", "encoding/binary") - validate() - - // now, the ignore tests. turn main and tests back on - main, tests = true, true - - // start with non-matching - name = "non-matching ignore" - ignore = map[string]bool{ - "nomatch": true, - } - except() - validate() - - // should have the same effect as ignoring main - name = "ignore the root" - ignore = map[string]bool{ - "github.com/example/varied": true, - } - except("net/http") - validate() - - // now drop a more interesting one - name = "ignore simple" - ignore = map[string]bool{ - "github.com/example/varied/simple": true, - } - // we get github.com/golang/dep/internal/gps from m1p, too, so it should still be there - except("go/parser") - validate() - - // now drop two - name = "ignore simple and namemismatch" - ignore = map[string]bool{ - "github.com/example/varied/simple": true, - "github.com/example/varied/namemismatch": true, - } - except("go/parser", "github.com/Masterminds/semver") - validate() - - // make sure tests and main play nice with ignore - name = "ignore simple and namemismatch, and no tests" - tests = false - except("go/parser", "github.com/Masterminds/semver", "encoding/binary") - validate() - name = "ignore simple and namemismatch, and no main" - main, tests = false, true - except("go/parser", "github.com/Masterminds/semver", "net/http") - validate() - name = "ignore simple and namemismatch, and no main or tests" - main, tests = false, false - except("go/parser", "github.com/Masterminds/semver", "net/http", "encoding/binary") - validate() - - main, tests = true, true - - // ignore two that should knock out gps - name = "ignore both importers" - ignore = map[string]bool{ - "github.com/example/varied/simple": true, - "github.com/example/varied/m1p": true, - } - except("sort", "github.com/golang/dep/internal/gps", "go/parser") - validate() - - // finally, directly ignore some external packages - name = "ignore external" - ignore = map[string]bool{ - "github.com/golang/dep/internal/gps": true, - "go/parser": true, - "sort": true, + for _, testCase := range []*flattenReachMapCase{ + // everything on + { + name: "simple", + expect: except(), + isStdLibFn: nil, + main: true, + tests: true, + }, + // no stdlib + { + name: "no stdlib", + expect: except("encoding/binary", "go/parser", "hash", "net/http", "os", "sort"), + isStdLibFn: paths.IsStandardImportPath, + main: true, + tests: true, + }, + // stdlib back in; now exclude tests, which should just cut one + { + name: "no tests", + expect: except("encoding/binary"), + isStdLibFn: nil, + main: true, + tests: false, + }, + // Now skip main, which still just cuts out one + { + name: "no main", + expect: except("net/http"), + isStdLibFn: nil, + main: false, + tests: true, + }, + // No test and no main, which should be additive + { + name: "no tests, no main", + expect: except("net/http", "encoding/binary"), + isStdLibFn: nil, + main: false, + tests: false, + }, + // now, the ignore tests. turn main and tests back on + // start with non-matching + { + name: "non-matching ignore", + expect: except(), + isStdLibFn: nil, + main: true, + tests: true, + ignore: map[string]bool{ + "nomatch": true, + }, + }, + // should have the same effect as ignoring main + { + name: "ignore the root", + expect: except("net/http"), + isStdLibFn: nil, + main: true, + tests: true, + ignore: map[string]bool{ + "github.com/example/varied": true, + }, + }, + // now drop a more interesting one + // we get github.com/golang/dep/internal/gps from m1p, too, so it should still be there + { + name: "ignore simple", + expect: except("go/parser"), + isStdLibFn: nil, + main: true, + tests: true, + ignore: map[string]bool{ + "github.com/example/varied/simple": true, + }, + }, + // now drop two + { + name: "ignore simple and nameismatch", + expect: except("go/parser", "github.com/Masterminds/semver"), + isStdLibFn: nil, + main: true, + tests: true, + ignore: map[string]bool{ + "github.com/example/varied/simple": true, + "github.com/example/varied/namemismatch": true, + }, + }, + // make sure tests and main play nice with ignore + { + name: "ignore simple and nameismatch, and no tests", + expect: except("go/parser", "github.com/Masterminds/semver", "encoding/binary"), + isStdLibFn: nil, + main: true, + tests: false, + ignore: map[string]bool{ + "github.com/example/varied/simple": true, + "github.com/example/varied/namemismatch": true, + }, + }, + { + name: "ignore simple and namemismatch, and no main", + expect: except("go/parser", "github.com/Masterminds/semver", "net/http"), + isStdLibFn: nil, + main: false, + tests: true, + ignore: map[string]bool{ + "github.com/example/varied/simple": true, + "github.com/example/varied/namemismatch": true, + }, + }, + { + name: "ignore simple and namemismatch, and no main or tests", + expect: except("go/parser", "github.com/Masterminds/semver", "net/http", "encoding/binary"), + isStdLibFn: nil, + main: false, + tests: false, + ignore: map[string]bool{ + "github.com/example/varied/simple": true, + "github.com/example/varied/namemismatch": true, + }, + }, + // ignore two that should knock out gps + { + name: "ignore both importers", + expect: except("sort", "github.com/golang/dep/internal/gps", "go/parser"), + isStdLibFn: nil, + main: true, + tests: true, + ignore: map[string]bool{ + "github.com/example/varied/simple": true, + "github.com/example/varied/m1p": true, + }, + }, + // finally, directly ignore some external packages + { + name: "ignore external", + expect: except("sort", "github.com/golang/dep/internal/gps", "go/parser"), + isStdLibFn: nil, + main: true, + tests: true, + ignore: map[string]bool{ + "github.com/golang/dep/internal/gps": true, + "go/parser": true, + "sort": true, + }, + }, + } { + t.Run(testCase.name, testFlattenReachMap(&vptree, testCase)) } - except("sort", "github.com/golang/dep/internal/gps", "go/parser") - validate() // The only thing varied *doesn't* cover is disallowed path patterns ptree, err := ListPackages(filepath.Join(getTestdataRootDir(t), "src", "disallow"), "disallow") @@ -1884,14 +1904,34 @@ func TestFlattenReachMap(t *testing.T) { t.Fatalf("ListPackages failed on disallow test case: %s", err) } - rm, em := ptree.ToReachMap(false, false, true, nil) - if len(em) != 0 { - t.Errorf("Should not have any error packages from ToReachMap, got %s", em) - } - result := rm.Flatten(true) - expect = []string{"github.com/golang/dep/internal/gps", "hash", "sort"} - if !reflect.DeepEqual(expect, result) { - t.Errorf("Wrong imports in %q case:\n\t(GOT): %s\n\t(WNT): %s", name, result, expect) + t.Run("disallowed", testFlattenReachMap(&ptree, &flattenReachMapCase{ + name: "disallowed", + expect: []string{"github.com/golang/dep/internal/gps", "hash", "sort"}, + isStdLibFn: nil, + main: false, + tests: false, + })) +} + +type flattenReachMapCase struct { + expect []string + name string + ignore map[string]bool + main, tests bool + isStdLibFn func(string) bool +} + +func testFlattenReachMap(ptree *PackageTree, testCase *flattenReachMapCase) func(*testing.T) { + return func(t *testing.T) { + t.Parallel() + rm, em := ptree.ToReachMap(testCase.main, testCase.tests, true, testCase.ignore) + if len(em) != 0 { + t.Errorf("Should not have any error pkgs from ToReachMap, got %s", em) + } + result := rm.FlattenFn(testCase.isStdLibFn) + if !reflect.DeepEqual(testCase.expect, result) { + t.Errorf("Wrong imports in %q case:\n\t(GOT): %s\n\t(WNT): %s", testCase.name, result, testCase.expect) + } } } diff --git a/internal/gps/pkgtree/reachmap.go b/internal/gps/pkgtree/reachmap.go index ce34eac67d..27af5e90ba 100644 --- a/internal/gps/pkgtree/reachmap.go +++ b/internal/gps/pkgtree/reachmap.go @@ -7,8 +7,6 @@ package pkgtree import ( "sort" "strings" - - "github.com/golang/dep/internal/gps/internal" ) // ReachMap maps a set of import paths (keys) to the sets of transitively @@ -20,44 +18,32 @@ type ReachMap map[string]struct { Internal, External []string } -// FlattenAll flattens a reachmap into a sorted, deduplicated list of all the -// external imports named by its contained packages. -// -// If stdlib is false, then stdlib imports are excluded from the result. -func (rm ReachMap) FlattenAll(stdlib bool) []string { - return rm.flatten(func(pkg string) bool { return true }, stdlib) +// Eliminate import paths with any elements having leading dots, leading +// underscores, or testdata. If these are internally reachable (which is +// a no-no, but possible), any external imports will have already been +// pulled up through ExternalReach. The key here is that we don't want +// to treat such packages as themselves being sources. +func pkgFilter(pkg string) bool { + for _, elem := range strings.Split(pkg, "/") { + if strings.HasPrefix(elem, ".") || strings.HasPrefix(elem, "_") || elem == "testdata" { + return false + } + } + return true } -// Flatten flattens a reachmap into a sorted, deduplicated list of all the +// FlattenFn flattens a reachmap into a sorted, deduplicated list of all the // external imports named by its contained packages, but excludes imports coming // from packages with disallowed patterns in their names: any path element with // a leading dot, a leading underscore, with the name "testdata". // -// If stdlib is false, then stdlib imports are excluded from the result. -func (rm ReachMap) Flatten(stdlib bool) []string { - f := func(pkg string) bool { - // Eliminate import paths with any elements having leading dots, leading - // underscores, or testdata. If these are internally reachable (which is - // a no-no, but possible), any external imports will have already been - // pulled up through ExternalReach. The key here is that we don't want - // to treat such packages as themselves being sources. - for _, elem := range strings.Split(pkg, "/") { - if strings.HasPrefix(elem, ".") || strings.HasPrefix(elem, "_") || elem == "testdata" { - return false - } - } - return true - } - - return rm.flatten(f, stdlib) -} - -func (rm ReachMap) flatten(filter func(string) bool, stdlib bool) []string { +// Imports for which exclude returns true will be left out. +func (rm ReachMap) FlattenFn(exclude func(string) bool) []string { exm := make(map[string]struct{}) for pkg, ie := range rm { - if filter(pkg) { + if pkgFilter(pkg) { for _, ex := range ie.External { - if !stdlib && internal.IsStdLib(ex) { + if exclude != nil && exclude(ex) { continue } exm[ex] = struct{}{} diff --git a/internal/gps/result.go b/internal/gps/result.go index 3c79ffeac8..03948ff5bd 100644 --- a/internal/gps/result.go +++ b/internal/gps/result.go @@ -14,6 +14,14 @@ import ( // additional methods that report information about the solve run. type Solution interface { Lock + // The name of the ProjectAnalyzer used in generating this solution. + AnalyzerName() string + // The version of the ProjectAnalyzer used in generating this solution. + AnalyzerVersion() int + // The name of the Solver used in generating this solution. + SolverName() string + // The version of the Solver used in generating this solution. + SolverVersion() int Attempts() int } @@ -26,6 +34,15 @@ type solution struct { // The hash digest of the input opts hd []byte + + // The analyzer name + analyzerName string + + // The analyzer version + analyzerVersion int + + // The solver used in producing this solution + solv Solver } // WriteDepTree takes a basedir and a Lock, and exports all the projects @@ -76,3 +93,19 @@ func (r solution) Attempts() int { func (r solution) InputHash() []byte { return r.hd } + +func (r solution) AnalyzerName() string { + return r.analyzerName +} + +func (r solution) AnalyzerVersion() int { + return r.analyzerVersion +} + +func (r solution) SolverName() string { + return r.solv.Name() +} + +func (r solution) SolverVersion() int { + return r.solv.Version() +} diff --git a/internal/gps/result_test.go b/internal/gps/result_test.go index ad2c235c94..da73b47b34 100644 --- a/internal/gps/result_test.go +++ b/internal/gps/result_test.go @@ -13,7 +13,6 @@ import ( ) var basicResult solution -var kub atom func pi(n string) ProjectIdentifier { return ProjectIdentifier{ @@ -35,12 +34,16 @@ func init() { }, nil), }, } - - // just in case something needs punishing, kubernetes is happy to oblige - kub = atom{ - id: pi("github.com/kubernetes/kubernetes"), - v: NewVersion("1.0.0").Is(Revision("528f879e7d3790ea4287687ef0ab3f2a01cc2718")), - } + basicResult.analyzerName, basicResult.analyzerVersion = (naiveAnalyzer{}).Info() + + // Just in case something needs punishing, kubernetes offers a complex, + // real-world set of dependencies, and this revision is known to work. + /* + _ = atom{ + id: pi("github.com/kubernetes/kubernetes"), + v: NewVersion("1.0.0").Is(Revision("528f879e7d3790ea4287687ef0ab3f2a01cc2718")), + } + */ } func testWriteDepTree(t *testing.T) { diff --git a/internal/gps/rootdata.go b/internal/gps/rootdata.go index eb7e10ae08..e912ca292c 100644 --- a/internal/gps/rootdata.go +++ b/internal/gps/rootdata.go @@ -8,7 +8,6 @@ import ( "sort" "github.com/armon/go-radix" - "github.com/golang/dep/internal/gps/internal" "github.com/golang/dep/internal/gps/pkgtree" ) @@ -54,15 +53,9 @@ type rootdata struct { // externalImportList returns a list of the unique imports from the root data. // Ignores and requires are taken into consideration, stdlib is excluded, and // errors within the local set of package are not backpropagated. -func (rd rootdata) externalImportList() []string { +func (rd rootdata) externalImportList(stdLibFn func(string) bool) []string { rm, _ := rd.rpt.ToReachMap(true, true, false, rd.ig) - all := rm.Flatten(false) - reach := make([]string, 0, len(all)) - for _, r := range all { - if !internal.IsStdLib(r) { - reach = append(reach, r) - } - } + reach := rm.FlattenFn(stdLibFn) // If there are any requires, slide them into the reach list, as well. if len(rd.req) > 0 { @@ -86,7 +79,7 @@ func (rd rootdata) externalImportList() []string { return reach } -func (rd rootdata) getApplicableConstraints() []workingConstraint { +func (rd rootdata) getApplicableConstraints(stdLibFn func(string) bool) []workingConstraint { // Merge the normal and test constraints together pc := rd.rm.DependencyConstraints().merge(rd.rm.TestDependencyConstraints()) @@ -120,8 +113,8 @@ func (rd rootdata) getApplicableConstraints() []workingConstraint { // Walk all dep import paths we have to consider and mark the corresponding // wc entry in the trie, if any - for _, im := range rd.externalImportList() { - if internal.IsStdLib(im) { + for _, im := range rd.externalImportList(stdLibFn) { + if stdLibFn(im) { continue } diff --git a/internal/gps/rootdata_test.go b/internal/gps/rootdata_test.go index 4b80284682..484c2cfcb9 100644 --- a/internal/gps/rootdata_test.go +++ b/internal/gps/rootdata_test.go @@ -17,6 +17,8 @@ func TestRootdataExternalImports(t *testing.T) { RootPackageTree: fix.rootTree(), Manifest: fix.rootmanifest(), ProjectAnalyzer: naiveAnalyzer{}, + stdLibFn: func(string) bool { return false }, + mkBridgeFn: overrideMkBridge, } is, err := Prepare(params, newdepspecSM(fix.ds, nil)) @@ -26,7 +28,7 @@ func TestRootdataExternalImports(t *testing.T) { rd := is.(*solver).rd want := []string{"a", "b"} - got := rd.externalImportList() + got := rd.externalImportList(params.stdLibFn) if !reflect.DeepEqual(want, got) { t.Errorf("Unexpected return from rootdata.externalImportList:\n\t(GOT): %s\n\t(WNT): %s", got, want) } @@ -35,7 +37,7 @@ func TestRootdataExternalImports(t *testing.T) { rd.req["c"] = true want = []string{"a", "b", "c"} - got = rd.externalImportList() + got = rd.externalImportList(params.stdLibFn) if !reflect.DeepEqual(want, got) { t.Errorf("Unexpected return from rootdata.externalImportList:\n\t(GOT): %s\n\t(WNT): %s", got, want) } @@ -46,7 +48,7 @@ func TestRootdataExternalImports(t *testing.T) { rd.rpt.Packages["root"] = poe // should still be the same - got = rd.externalImportList() + got = rd.externalImportList(params.stdLibFn) if !reflect.DeepEqual(want, got) { t.Errorf("Unexpected return from rootdata.externalImportList:\n\t(GOT): %s\n\t(WNT): %s", got, want) } @@ -56,7 +58,7 @@ func TestRootdataExternalImports(t *testing.T) { rd.ig["b"] = true want = []string{"a", "c"} - got = rd.externalImportList() + got = rd.externalImportList(params.stdLibFn) if !reflect.DeepEqual(want, got) { t.Errorf("Unexpected return from rootdata.externalImportList:\n\t(GOT): %s\n\t(WNT): %s", got, want) } @@ -70,6 +72,8 @@ func TestGetApplicableConstraints(t *testing.T) { RootPackageTree: fix.rootTree(), Manifest: fix.rootmanifest(), ProjectAnalyzer: naiveAnalyzer{}, + stdLibFn: func(string) bool { return false }, + mkBridgeFn: overrideMkBridge, } is, err := Prepare(params, newdepspecSM(fix.ds, nil)) @@ -211,7 +215,7 @@ func TestGetApplicableConstraints(t *testing.T) { t.Run(fix.name, func(t *testing.T) { fix.mut() - got := rd.getApplicableConstraints() + got := rd.getApplicableConstraints(params.stdLibFn) if !reflect.DeepEqual(fix.result, got) { t.Errorf("unexpected applicable constraint set:\n\t(GOT): %+v\n\t(WNT): %+v", got, fix.result) } diff --git a/internal/gps/solve_bimodal_test.go b/internal/gps/solve_bimodal_test.go index f915189a2b..1dcff60b5f 100644 --- a/internal/gps/solve_bimodal_test.go +++ b/internal/gps/solve_bimodal_test.go @@ -589,7 +589,7 @@ var bimodalFixtures = map[string]bimodalFixture{ // Preferred version, as derived from a dep's lock, is attempted first, even // if the root also has a direct dep on it (root doesn't need to use // preferreds, because it has direct control AND because the root lock - // already supercedes dep lock "preferences") + // already supersedes dep lock "preferences") "respect dep prefv with root import": { ds: []depspec{ dsp(mkDepspec("root 0.0.0"), diff --git a/internal/gps/solve_test.go b/internal/gps/solve_test.go index de561672b6..29e4b619fa 100644 --- a/internal/gps/solve_test.go +++ b/internal/gps/solve_test.go @@ -18,7 +18,6 @@ import ( "testing" "unicode" - "github.com/golang/dep/internal/gps/internal" "github.com/golang/dep/internal/gps/pkgtree" ) @@ -27,33 +26,12 @@ var fixtorun string // TODO(sdboyer) regression test ensuring that locks with only revs for projects don't cause errors func init() { flag.StringVar(&fixtorun, "gps.fix", "", "A single fixture to run in TestBasicSolves or TestBimodalSolves") - mkBridge(nil, nil, false) - overrideMkBridge() - overrideIsStdLib() } -// sets the mkBridge global func to one that allows virtualized RootDirs -func overrideMkBridge() { - // For all tests, override the base bridge with the depspecBridge that skips - // verifyRootDir calls - mkBridge = func(s *solver, sm SourceManager, down bool) sourceBridge { - return &depspecBridge{ - &bridge{ - sm: sm, - s: s, - down: down, - vlists: make(map[ProjectIdentifier][]Version), - }, - } - } -} - -// sets the isStdLib func to always return false, otherwise it would identify -// pretty much all of our fixtures as being stdlib and skip everything -func overrideIsStdLib() { - internal.IsStdLib = func(path string) bool { - return false - } +// overrideMkBridge overrides the base bridge with the depspecBridge that skips +// verifyRootDir calls +func overrideMkBridge(s *solver, sm SourceManager, down bool) sourceBridge { + return &depspecBridge{mkBridge(s, sm, down)} } type testlogger struct { @@ -80,7 +58,10 @@ func fixSolve(params SolveParameters, sm SourceManager, t *testing.T) (Solution, // system will decide whether or not to actually show the output (based on // -v, or selectively on test failure). params.TraceLogger = log.New(testlogger{T: t}, "", 0) - + // always return false, otherwise it would identify pretty much all of + // our fixtures as being stdlib and skip everything + params.stdLibFn = func(string) bool { return false } + params.mkBridgeFn = overrideMkBridge s, err := Prepare(params, sm) if err != nil { return nil, err @@ -204,7 +185,7 @@ func fixtureSolveSimpleChecks(fix specfix, soln Solution, err error, t *testing. if err != nil { if fixfail == nil { t.Errorf("Solve failed unexpectedly:\n%s", err) - } else if !reflect.DeepEqual(fixfail, err) { + } else if !(fixfail.Error() == err.Error()) { // TODO(sdboyer) reflect.DeepEqual works for now, but once we start // modeling more complex cases, this should probably become more robust t.Errorf("Failure mismatch:\n\t(GOT): %s\n\t(WNT): %s", err, fixfail) @@ -320,7 +301,9 @@ func TestBadSolveOpts(t *testing.T) { fix.ds[0].n = ProjectRoot(pn) sm := newdepspecSM(fix.ds, nil) - params := SolveParameters{} + params := SolveParameters{ + mkBridgeFn: overrideMkBridge, + } _, err := Prepare(params, nil) if err == nil { @@ -438,14 +421,7 @@ func TestBadSolveOpts(t *testing.T) { // swap out the test mkBridge override temporarily, just to make sure we get // the right error - mkBridge = func(s *solver, sm SourceManager, down bool) sourceBridge { - return &bridge{ - sm: sm, - s: s, - down: down, - vlists: make(map[ProjectIdentifier][]Version), - } - } + params.mkBridgeFn = nil _, err = Prepare(params, sm) if err == nil { @@ -462,7 +438,4 @@ func TestBadSolveOpts(t *testing.T) { } else if !strings.Contains(err.Error(), "is a file, not a directory") { t.Error("Prepare should have given error on file as RootDir, but gave:", err) } - - // swap them back...not sure if this matters, but just in case - overrideMkBridge() } diff --git a/internal/gps/solver.go b/internal/gps/solver.go index c2b8203ac7..06e6e9afa6 100644 --- a/internal/gps/solver.go +++ b/internal/gps/solver.go @@ -12,27 +12,10 @@ import ( "strings" "github.com/armon/go-radix" - "github.com/golang/dep/internal/gps/internal" + "github.com/golang/dep/internal/gps/paths" "github.com/golang/dep/internal/gps/pkgtree" ) -var ( - osList []string - archList []string - ignoreTags = []string{} //[]string{"appengine", "ignore"} //TODO: appengine is a special case for now: https://github.com/tools/godep/issues/353 -) - -func init() { - // The supported systems are listed in - // https://github.com/golang/go/blob/master/src/go/build/syslist.go - // The lists are not exported, so we need to duplicate them here. - osListString := "android darwin dragonfly freebsd linux nacl netbsd openbsd plan9 solaris windows" - osList = strings.Split(osListString, " ") - - archListString := "386 amd64 amd64p32 arm armbe arm64 arm64be ppc64 ppc64le mips mipsle mips64 mips64le mips64p32 mips64p32le ppc s390 s390x sparc sparc64" - archList = strings.Split(archListString, " ") -} - var rootRev = Revision("") // SolveParameters hold all arguments to a solver run. @@ -109,6 +92,15 @@ type SolveParameters struct { // solver will generate informative trace output as it moves through the // solving process. TraceLogger *log.Logger + + // stdLibFn is the function to use to recognize standard library import paths. + // Only overridden for tests. Defaults to paths.IsStandardImportPath if nil. + stdLibFn func(string) bool + + // mkBridgeFn is the function to use to create sourceBridges. + // Only overridden for tests (so we can run with virtual RootDir). + // Defaults to mkBridge if nil. + mkBridgeFn func(*solver, SourceManager, bool) sourceBridge } // solver is a CDCL-style constraint solver with satisfiability conditions @@ -122,6 +114,9 @@ type solver struct { // Logger used exclusively for trace output, or nil to suppress. tl *log.Logger + // The function to use to recognize standard library import paths. + stdLibFn func(string) bool + // A bridge to the standard SourceManager. The adapter does some local // caching of pre-sorted version lists, as well as translation between the // full-on ProjectIdentifiers that the solver deals with and the simplified @@ -284,15 +279,23 @@ func Prepare(params SolveParameters, sm SourceManager) (Solver, error) { return nil, err } + if params.stdLibFn == nil { + params.stdLibFn = paths.IsStandardImportPath + } + s := &solver{ - tl: params.TraceLogger, - rd: rd, + tl: params.TraceLogger, + stdLibFn: params.stdLibFn, + rd: rd, } // Set up the bridge and ensure the root dir is in good, working order - // before doing anything else. (This call is stubbed out in tests, via - // overriding mkBridge(), so we can run with virtual RootDir.) - s.b = mkBridge(s, sm, params.Downgrade) + // before doing anything else. + if params.mkBridgeFn == nil { + s.b = mkBridge(s, sm, params.Downgrade) + } else { + s.b = params.mkBridgeFn(s, sm, params.Downgrade) + } err = s.b.verifyRootDir(params.RootDir) if err != nil { return nil, err @@ -333,6 +336,49 @@ type Solver interface { // Solve initiates a solving run. It will either complete successfully with // a Solution, or fail with an informative error. Solve() (Solution, error) + + // Name returns a string identifying the particular solver backend. + // + // Different solvers likely have different invariants, and likely will not + // have identical possible result sets for any particular inputs; in some + // cases, they may even be disjoint. + Name() string + + // Version returns an int indicating the version of the solver of the given + // Name(). Implementations should change their reported version ONLY when + // the logic is changed in such a way that substantially changes the result + // set that is possible for a substantial subset of likely inputs. + // + // "Substantial" is an imprecise term, and it is used intentionally. There + // are no easy, general ways of subdividing constraint solving problems such + // that one can know, a priori, the full impact that subtle algorithmic + // changes will have on possible result sets. Consequently, we have to fall + // back on coarser, intuition-based reasoning as to whether a change is + // large enough that it is likely to be broadly user-visible. + // + // This is acceptable, because this value is not used programmatically by + // the solver in any way. Rather, it is intend for implementing tools to + // use as a coarse signal to users about compatibility between their tool's + // version and the current data, typically via persistence to a Lock. + // Changes to the version number reported should be weighed between + // confusing teams by having two members' tools continuously rolling back + // each others' chosen Solutions for no apparent reason, and annoying teams + // by changing the number for changes so remote that warnings about solver + // version mismatches become meaningless. + // + // Err on the side of caution. + // + // Chronology is the only implication of the ordering - that lower version + // numbers were published before higher numbers. + Version() int +} + +func (s *solver) Name() string { + return "gps-cdcl" +} + +func (s *solver) Version() int { + return 1 } // Solve attempts to find a dependency solution for the given project, as @@ -356,9 +402,10 @@ func (s *solver) Solve() (Solution, error) { var soln solution if err == nil { soln = solution{ - att: s.attempts, + att: s.attempts, + solv: s, } - + soln.analyzerName, soln.analyzerVersion = s.rd.an.Info() soln.hd = s.HashInputs() // Convert ProjectAtoms into LockedProjects @@ -496,7 +543,7 @@ func (s *solver) selectRoot() error { // If we're looking for root's deps, get it from opts and local root // analysis, rather than having the sm do it - deps, err := s.intersectConstraintsWithImports(s.rd.combineConstraints(), s.rd.externalImportList()) + deps, err := s.intersectConstraintsWithImports(s.rd.combineConstraints(), s.rd.externalImportList(s.stdLibFn)) if err != nil { // TODO(sdboyer) this could well happen; handle it with a more graceful error panic(fmt.Sprintf("shouldn't be possible %s", err)) @@ -615,7 +662,7 @@ func (s *solver) intersectConstraintsWithImports(deps []workingConstraint, reach dmap := make(map[ProjectRoot]completeDep) for _, rp := range reach { // If it's a stdlib-shaped package, skip it. - if internal.IsStdLib(rp) { + if s.stdLibFn(rp) { continue } diff --git a/internal/gps/source.go b/internal/gps/source.go index ad06f68033..85cdb23eb4 100644 --- a/internal/gps/source.go +++ b/internal/gps/source.go @@ -246,9 +246,25 @@ func (sg *sourceGateway) exportVersionTo(ctx context.Context, v Version, to stri return err } - return sg.suprvsr.do(ctx, sg.src.upstreamURL(), ctExportTree, func(ctx context.Context) error { + err = sg.suprvsr.do(ctx, sg.src.upstreamURL(), ctExportTree, func(ctx context.Context) error { return sg.src.exportRevisionTo(ctx, r, to) }) + + // It's possible (in git) that we may have tried this against a version that + // doesn't exist in the repository cache, even though we know it exists in + // the upstream. If it looks like that might be the case, update the local + // and retry. + // TODO(sdboyer) It'd be better if we could check the error to see if this + // actually was the cause of the problem. + if err != nil && sg.srcState&sourceHasLatestLocally == 0 { + if _, err = sg.require(ctx, sourceHasLatestLocally); err != nil { + err = sg.suprvsr.do(ctx, sg.src.upstreamURL(), ctExportTree, func(ctx context.Context) error { + return sg.src.exportRevisionTo(ctx, r, to) + }) + } + } + + return err } func (sg *sourceGateway) getManifestAndLock(ctx context.Context, pr ProjectRoot, v Version, an ProjectAnalyzer) (Manifest, Lock, error) { @@ -276,6 +292,27 @@ func (sg *sourceGateway) getManifestAndLock(ctx context.Context, pr ProjectRoot, m, l, err = sg.src.getManifestAndLock(ctx, pr, r, an) return err }) + + // It's possible (in git) that we may have tried this against a version that + // doesn't exist in the repository cache, even though we know it exists in + // the upstream. If it looks like that might be the case, update the local + // and retry. + // TODO(sdboyer) It'd be better if we could check the error to see if this + // actually was the cause of the problem. + if err != nil && sg.srcState&sourceHasLatestLocally == 0 { + // TODO(sdboyer) we should warn/log/something in adaptive recovery + // situations like this + _, err = sg.require(ctx, sourceHasLatestLocally) + if err != nil { + return nil, nil, err + } + + err = sg.suprvsr.do(ctx, label, ctGetManifestAndLock, func(ctx context.Context) error { + m, l, err = sg.src.getManifestAndLock(ctx, pr, r, an) + return err + }) + } + if err != nil { return nil, nil, err } @@ -310,6 +347,27 @@ func (sg *sourceGateway) listPackages(ctx context.Context, pr ProjectRoot, v Ver ptree, err = sg.src.listPackages(ctx, pr, r) return err }) + + // It's possible (in git) that we may have tried this against a version that + // doesn't exist in the repository cache, even though we know it exists in + // the upstream. If it looks like that might be the case, update the local + // and retry. + // TODO(sdboyer) It'd be better if we could check the error to see if this + // actually was the cause of the problem. + if err != nil && sg.srcState&sourceHasLatestLocally == 0 { + // TODO(sdboyer) we should warn/log/something in adaptive recovery + // situations like this + _, err = sg.require(ctx, sourceHasLatestLocally) + if err != nil { + return pkgtree.PackageTree{}, err + } + + err = sg.suprvsr.do(ctx, label, ctGetManifestAndLock, func(ctx context.Context) error { + ptree, err = sg.src.listPackages(ctx, pr, r) + return err + }) + } + if err != nil { return pkgtree.PackageTree{}, err } diff --git a/internal/gps/source_manager.go b/internal/gps/source_manager.go index a7307fafcf..0119fca472 100644 --- a/internal/gps/source_manager.go +++ b/internal/gps/source_manager.go @@ -80,6 +80,11 @@ type ProjectAnalyzer interface { // Perform analysis of the filesystem tree rooted at path, with the // root import path importRoot, to determine the project's constraints, as // indicated by a Manifest and Lock. + // + // Note that an error will typically cause the solver to treat the analyzed + // version as unusable. As such, an error should generally only be returned + // if the code tree is somehow malformed, but not if the implementor's + // expected files containing Manifest and Lock data are merely absent. DeriveManifestAndLock(path string, importRoot ProjectRoot) (Manifest, Lock, error) // Report the name and version of this ProjectAnalyzer. diff --git a/internal/gps/source_test.go b/internal/gps/source_test.go index 986b825142..591ece626a 100644 --- a/internal/gps/source_test.go +++ b/internal/gps/source_test.go @@ -155,7 +155,7 @@ func testSourceGateway(t *testing.T) { t.Fatalf("unexpected err when getting package tree with known rev: %s", err) } if !reflect.DeepEqual(wantptree, ptree) { - t.Fatalf("got incorrect PackageTree:\n\t(GOT): %#v\n\t(WNT): %#v", wantptree, ptree) + t.Fatalf("got incorrect PackageTree:\n\t(GOT): %#v\n\t(WNT): %#v", ptree, wantptree) } ptree, err = sg.listPackages(ctx, ProjectRoot("github.com/sdboyer/deptest"), NewVersion("v1.0.0")) @@ -163,7 +163,7 @@ func testSourceGateway(t *testing.T) { t.Fatalf("unexpected err when getting package tree with unpaired good version: %s", err) } if !reflect.DeepEqual(wantptree, ptree) { - t.Fatalf("got incorrect PackageTree:\n\t(GOT): %#v\n\t(WNT): %#v", wantptree, ptree) + t.Fatalf("got incorrect PackageTree:\n\t(GOT): %#v\n\t(WNT): %#v", ptree, wantptree) } } } diff --git a/internal/gps/typed_radix.go b/internal/gps/typed_radix.go index 2de3f6b196..9c6a9216ac 100644 --- a/internal/gps/typed_radix.go +++ b/internal/gps/typed_radix.go @@ -40,16 +40,6 @@ func (t *deducerTrie) Delete(s string) (pathDeducer, bool) { return nil, false } -// Get is used to lookup a specific key, returning the value and if it was found -func (t *deducerTrie) Get(s string) (pathDeducer, bool) { - t.RLock() - defer t.RUnlock() - if d, has := t.t.Get(s); has { - return d.(pathDeducer), has - } - return nil, false -} - // Insert is used to add a newentry or update an existing entry. Returns if updated. func (t *deducerTrie) Insert(s string, d pathDeducer) (pathDeducer, bool) { t.Lock() @@ -60,13 +50,6 @@ func (t *deducerTrie) Insert(s string, d pathDeducer) (pathDeducer, bool) { return nil, false } -// Len is used to return the number of elements in the tree -func (t *deducerTrie) Len() int { - t.RLock() - defer t.RUnlock() - return t.t.Len() -} - // LongestPrefix is like Get, but instead of an exact match, it will return the // longest prefix match. func (t *deducerTrie) LongestPrefix(s string) (string, pathDeducer, bool) { @@ -78,19 +61,6 @@ func (t *deducerTrie) LongestPrefix(s string) (string, pathDeducer, bool) { return "", nil, false } -// ToMap is used to walk the tree and convert it to a map. -func (t *deducerTrie) ToMap() map[string]pathDeducer { - m := make(map[string]pathDeducer) - t.RLock() - t.t.Walk(func(s string, d interface{}) bool { - m[s] = d.(pathDeducer) - return false - }) - - t.RUnlock() - return m -} - // isPathPrefixOrEqual is an additional helper check to ensure that the literal // string prefix returned from a radix tree prefix match is also a path tree // match. diff --git a/internal/gps/vcs_repo.go b/internal/gps/vcs_repo.go index 10a522ba84..550b4fd43a 100644 --- a/internal/gps/vcs_repo.go +++ b/internal/gps/vcs_repo.go @@ -24,6 +24,40 @@ type ctxRepo interface { //ping(context.Context) (bool, error) } +func newCtxRepo(s vcs.Type, ustr, path string) (r ctxRepo, err error) { + r, err = getVCSRepo(s, ustr, path) + if err != nil { + // if vcs could not initialize the repo due to a local error + // then the local repo is in an incorrect state. Remove and + // treat it as a new not-yet-cloned repo. + + // TODO(marwan-at-work): warn/give progress of the above comment. + os.RemoveAll(path) + r, err = getVCSRepo(s, ustr, path) + } + + return +} + +func getVCSRepo(s vcs.Type, ustr, path string) (r ctxRepo, err error) { + switch s { + case vcs.Git: + var repo *vcs.GitRepo + repo, err = vcs.NewGitRepo(ustr, path) + r = &gitRepo{repo} + case vcs.Bzr: + var repo *vcs.BzrRepo + repo, err = vcs.NewBzrRepo(ustr, path) + r = &bzrRepo{repo} + case vcs.Hg: + var repo *vcs.HgRepo + repo, err = vcs.NewHgRepo(ustr, path) + r = &hgRepo{repo} + } + + return +} + // original implementation of these methods come from // https://github.com/Masterminds/vcs diff --git a/internal/gps/vcs_repo_test.go b/internal/gps/vcs_repo_test.go index 136fba24c5..c5d6b8b557 100644 --- a/internal/gps/vcs_repo_test.go +++ b/internal/gps/vcs_repo_test.go @@ -5,10 +5,14 @@ package gps import ( + "archive/tar" + "compress/gzip" "context" "errors" + "io" "io/ioutil" "os" + "path/filepath" "testing" "time" @@ -18,6 +22,8 @@ import ( // original implementation of these test files come from // https://github.com/Masterminds/vcs test files +const gitRemoteTestRepo = "https://github.com/Masterminds/VCSTestRepo" + func TestErrs(t *testing.T) { err := newVcsLocalErrorOr("", context.Canceled, "") if err != context.Canceled { @@ -46,6 +52,73 @@ func TestErrs(t *testing.T) { } } +func TestNewCtxRepoHappyPath(t *testing.T) { + t.Parallel() + + tempDir, err := ioutil.TempDir("", "go-ctx-repo-happy-test") + if err != nil { + t.Fatal(err) + } + defer func() { + err = os.RemoveAll(tempDir) + if err != nil { + t.Error(err) + } + }() + + _, err = newCtxRepo(vcs.Git, gitRemoteTestRepo, tempDir) + if err != nil { + t.Fatal(err) + } +} + +func TestNewCtxRepoRecovery(t *testing.T) { + t.Parallel() + + tempDir, err := ioutil.TempDir("", "go-ctx-repo-recovery-test") + if err != nil { + t.Fatal(err) + } + defer func() { + err = os.RemoveAll(tempDir) + if err != nil { + t.Error(err) + } + }() + + cwd, err := os.Getwd() + if err != nil { + t.Fatal(err) + } + + src := filepath.Join(cwd, "_testdata", "badrepo", "corrupt_dot_git_directory.tar") + f, err := os.Open(src) + if err != nil { + t.Fatal(err) + } + defer f.Close() + + dest := filepath.Join(tempDir, ".git") + err = untar(dest, f) + if err != nil { + t.Fatalf("could not untar corrupt repo into temp folder: %v\n", err) + } + + _, err = getVCSRepo(vcs.Git, gitRemoteTestRepo, tempDir) + if err != nil { + if _, ok := err.(*vcs.LocalError); !ok { + t.Fatalf("expected a local error but got: %v\n", err) + } + } else { + t.Fatal("expected getVCSRepo to fail when pointing to a corrupt local path. It is possible that vcs.GitNewRepo updated to gracefully handle this test scenario. Check the return of vcs.GitNewRepo.") + } + + _, err = newCtxRepo(vcs.Git, gitRemoteTestRepo, tempDir) + if err != nil { + t.Fatal(err) + } +} + func testSvnRepo(t *testing.T) { t.Parallel() @@ -219,7 +292,7 @@ func testGitRepo(t *testing.T) { } }() - rep, err := vcs.NewGitRepo("https://github.com/Masterminds/VCSTestRepo", tempDir+"/VCSTestRepo") + rep, err := vcs.NewGitRepo(gitRemoteTestRepo, tempDir+"/VCSTestRepo") if err != nil { t.Fatal(err) } @@ -344,3 +417,46 @@ func testBzrRepo(t *testing.T) { t.Fatalf("Current failed to detect Bzr on rev 2 of branch. Got version: %s", v) } } + +func untar(dst string, r io.Reader) error { + gzr, err := gzip.NewReader(r) + if err != nil { + return err + } + defer gzr.Close() + + tr := tar.NewReader(gzr) + + for { + header, err := tr.Next() + + switch { + case err == io.EOF: + return nil + case err != nil: + return err + case header == nil: + continue + } + + target := filepath.Join(dst, header.Name) + switch header.Typeflag { + case tar.TypeDir: + if _, err := os.Stat(target); err != nil { + if err := os.MkdirAll(target, 0755); err != nil { + return err + } + } + case tar.TypeReg: + f, err := os.OpenFile(target, os.O_CREATE|os.O_RDWR, os.FileMode(header.Mode)) + if err != nil { + return err + } + defer f.Close() + + if _, err := io.Copy(f, tr); err != nil { + return err + } + } + } +} diff --git a/internal/gps/vcs_source.go b/internal/gps/vcs_source.go index 189f5d6982..5533f5e843 100644 --- a/internal/gps/vcs_source.go +++ b/internal/gps/vcs_source.go @@ -15,7 +15,7 @@ import ( "time" "github.com/Masterminds/semver" - "github.com/golang/dep/internal/gps/internal/fs" + "github.com/golang/dep/internal/fs" "github.com/golang/dep/internal/gps/pkgtree" ) @@ -148,7 +148,7 @@ func (s *gitSource) exportRevisionTo(ctx context.Context, rev Revision, to strin // // Sadly, this approach *does* also write out vendor dirs. There doesn't // appear to be a way to make checkout-index respect sparse checkout - // rules (-a supercedes it). The alternative is using plain checkout, + // rules (-a supersedes it). The alternative is using plain checkout, // though we have a bunch of housekeeping to do to set up, then tear // down, the sparse checkout controls, as well as restore the original // index and HEAD. @@ -278,7 +278,8 @@ func (s *gitSource) listVersions(ctx context.Context) (vlist []PairedVersion, er // according to the input URL. type gopkginSource struct { gitSource - major uint64 + major uint64 + unstable bool } func (s *gopkginSource) listVersions(ctx context.Context) ([]PairedVersion, error) { @@ -291,22 +292,20 @@ func (s *gopkginSource) listVersions(ctx context.Context) ([]PairedVersion, erro vlist := make([]PairedVersion, len(ovlist)) k := 0 var dbranch int // index of branch to be marked default - var bsv *semver.Version + var bsv semver.Version for _, v := range ovlist { // all git versions will always be paired pv := v.(versionPair) switch tv := pv.v.(type) { case semVersion: - if tv.sv.Major() == s.major { + if tv.sv.Major() == s.major && !s.unstable { vlist[k] = v k++ } case branchVersion: // The semver lib isn't exactly the same as gopkg.in's logic, but // it's close enough that it's probably fine to use. We can be more - // exact if real problems crop up. The most obvious vector for - // problems is that we totally ignore the "unstable" designation - // right now. + // exact if real problems crop up. sv, err := semver.NewVersion(tv.name) if err != nil || sv.Major() != s.major { // not a semver-shaped branch name at all, or not the same major @@ -314,11 +313,17 @@ func (s *gopkginSource) listVersions(ctx context.Context) ([]PairedVersion, erro continue } + // Gopkg.in has a special "-unstable" suffix which we need to handle + // separately. + if s.unstable != strings.HasSuffix(tv.name, gopkgUnstableSuffix) { + continue + } + // Turn off the default branch marker unconditionally; we can't know // which one to mark as default until we've seen them all tv.isDefault = false // Figure out if this is the current leader for default branch - if bsv == nil || bsv.LessThan(sv) { + if bsv == (semver.Version{}) || bsv.LessThan(sv) { bsv = sv dbranch = k } @@ -331,7 +336,7 @@ func (s *gopkginSource) listVersions(ctx context.Context) ([]PairedVersion, erro } vlist = vlist[:k] - if bsv != nil { + if bsv != (semver.Version{}) { dbv := vlist[dbranch].(versionPair) vlist[dbranch] = branchVersion{ name: dbv.v.(branchVersion).name, diff --git a/internal/gps/vcs_source_test.go b/internal/gps/vcs_source_test.go index e671a28ef4..8fff2d7899 100644 --- a/internal/gps/vcs_source_test.go +++ b/internal/gps/vcs_source_test.go @@ -10,6 +10,7 @@ import ( "net/url" "os/exec" "reflect" + "strings" "sync" "testing" ) @@ -152,10 +153,12 @@ func testGopkginSourceInteractions(t *testing.T) { t.Errorf("URL was bad, lolwut? errtext: %s", err) return } + unstable := strings.HasSuffix(opath, gopkgUnstableSuffix) mb := maybeGopkginSource{ - opath: opath, - url: u, - major: major, + opath: opath, + url: u, + major: major, + unstable: unstable, } ctx := context.Background() @@ -240,7 +243,7 @@ func testGopkginSourceInteractions(t *testing.T) { // simultaneously run for v1, v2, and v3 filters of the target repo wg := &sync.WaitGroup{} - wg.Add(3) + wg.Add(4) go func() { tfunc("gopkg.in/sdboyer/gpkt.v1", "github.com/sdboyer/gpkt", 1, []Version{ NewVersion("v1.1.0").Is(Revision("b2cb48dda625f6640b34d9ffb664533359ac8b91")), @@ -265,6 +268,13 @@ func testGopkginSourceInteractions(t *testing.T) { wg.Done() }() + go func() { + tfunc("github.com/sdboyer/gpkt2.v1-unstable", "github.com/sdboyer/gpkt2", 1, []Version{ + newDefaultBranch("v1-unstable").Is(Revision("24de0be8f4a0b8a44321562117749b257bfcef69")), + }) + wg.Done() + }() + wg.Wait() } diff --git a/internal/gps/version.go b/internal/gps/version.go index 4aa1f40410..060c5b6971 100644 --- a/internal/gps/version.go +++ b/internal/gps/version.go @@ -119,6 +119,10 @@ func (r Revision) String() string { return string(r) } +func (r Revision) ImpliedCaretString() string { + return r.String() +} + func (r Revision) typedString() string { return "r-" + string(r) } @@ -195,6 +199,10 @@ func (v branchVersion) String() string { return string(v.name) } +func (v branchVersion) ImpliedCaretString() string { + return v.String() +} + func (v branchVersion) typedString() string { return fmt.Sprintf("b-%s", v.String()) } @@ -272,6 +280,10 @@ func (v plainVersion) String() string { return string(v) } +func (v plainVersion) ImpliedCaretString() string { + return v.String() +} + func (v plainVersion) typedString() string { return fmt.Sprintf("pv-%s", v.String()) } @@ -344,7 +356,7 @@ func (v plainVersion) Is(r Revision) PairedVersion { } type semVersion struct { - sv *semver.Version + sv semver.Version } func (v semVersion) String() string { @@ -355,6 +367,10 @@ func (v semVersion) String() string { return str } +func (v semVersion) ImpliedCaretString() string { + return v.sv.ImpliedCaretString() +} + func (v semVersion) typedString() string { return fmt.Sprintf("sv-%s", v.String()) } @@ -439,6 +455,10 @@ func (v versionPair) String() string { return v.v.String() } +func (v versionPair) ImpliedCaretString() string { + return v.v.ImpliedCaretString() +} + func (v versionPair) typedString() string { return fmt.Sprintf("%s-%s", v.Unpair().typedString(), v.Underlying().typedString()) } diff --git a/internal/gps/version_unifier.go b/internal/gps/version_unifier.go index 7f9dc5d646..d9cfb2a9ef 100644 --- a/internal/gps/version_unifier.go +++ b/internal/gps/version_unifier.go @@ -184,6 +184,13 @@ func (vtu versionTypeUnion) String() string { panic("versionTypeUnion should never be turned into a string; it is solver internal-only") } +// This should generally not be called, but is required for the interface. If it +// is called, we have a bigger problem (the type has escaped the solver); thus, +// panic. +func (vtu versionTypeUnion) ImpliedCaretString() string { + panic("versionTypeUnion should never be turned into a string; it is solver internal-only") +} + func (vtu versionTypeUnion) typedString() string { panic("versionTypeUnion should never be turned into a string; it is solver internal-only") } diff --git a/internal/test/integration_testcase.go b/internal/test/integration_testcase.go index 241612b034..02b1a0c6f0 100644 --- a/internal/test/integration_testcase.go +++ b/internal/test/integration_testcase.go @@ -5,13 +5,11 @@ package test import ( - "bytes" "encoding/json" "flag" "io/ioutil" "os" "path/filepath" - "regexp" "strings" "testing" "unicode" @@ -33,10 +31,12 @@ type IntegrationTestCase struct { GopathInitial map[string]string `json:"gopath-initial"` VendorInitial map[string]string `json:"vendor-initial"` VendorFinal []string `json:"vendor-final"` + InitPath string `json:"init-path"` } -func NewTestCase(t *testing.T, name, wd string) *IntegrationTestCase { - rootPath := filepath.FromSlash(filepath.Join(wd, "testdata", "harness_tests", name)) +// NewTestCase creates a new IntegrationTestCase. +func NewTestCase(t *testing.T, dir, name string) *IntegrationTestCase { + rootPath := filepath.FromSlash(filepath.Join(dir, name)) n := &IntegrationTestCase{ t: t, name: name, @@ -46,47 +46,40 @@ func NewTestCase(t *testing.T, name, wd string) *IntegrationTestCase { } j, err := ioutil.ReadFile(filepath.Join(rootPath, "testcase.json")) if err != nil { - panic(err) + t.Fatal(err) } err = json.Unmarshal(j, n) if err != nil { - panic(err) + t.Fatal(err) } return n } -var jsonNils *regexp.Regexp = regexp.MustCompile(`.*: null,.*\r?\n`) -var jsonCmds *regexp.Regexp = regexp.MustCompile(`(?s) "commands": \[(.*) ],`) -var jsonInds *regexp.Regexp = regexp.MustCompile(`(?s)\s*\n\s*`) +func (tc *IntegrationTestCase) InitialPath() string { + return tc.initialPath +} -// Cleanup writes the resulting TestCase back to the directory, if the -update -// flag is set. During the test, comparisons made to the TestCase should -// write the result back to the TestCase when -update is enabled -func (tc *IntegrationTestCase) Cleanup() { - if *UpdateGolden { - j, err := json.MarshalIndent(tc, "", " ") - if err != nil { - panic(err) +// UpdateFile updates the golden file with the working result. +func (tc *IntegrationTestCase) UpdateFile(goldenPath, workingPath string) { + exists, working, err := getFile(workingPath) + if err != nil { + tc.t.Fatalf("Error reading project file %s: %s", goldenPath, err) + } + + golden := filepath.Join(tc.finalPath, goldenPath) + if exists { + if err := tc.WriteFile(golden, working); err != nil { + tc.t.Fatal(err) } - j = jsonNils.ReplaceAll(j, []byte("")) - cmds := jsonCmds.FindAllSubmatch(j, -1)[0][1] - n := jsonInds.ReplaceAll(cmds, []byte("")) - n = bytes.Replace(n, []byte("["), []byte("\n ["), -1) - n = bytes.Replace(n, []byte(`","`), []byte(`", "`), -1) - n = append(n, '\n') - j = bytes.Replace(j, cmds, n, -1) - j = append(j, '\n') - err = ioutil.WriteFile(filepath.Join(tc.rootPath, "testcase.json"), j, 0666) - if err != nil { - tc.t.Errorf("Failed to update testcase %s: %s", tc.name, err) + } else { + err := os.Remove(golden) + if err != nil && !os.IsNotExist(err) { + tc.t.Fatal(err) } } } -func (tc *IntegrationTestCase) InitialPath() string { - return tc.initialPath -} - +// CompareFile compares the golden file with the working result. func (tc *IntegrationTestCase) CompareFile(goldenPath, working string) { golden := filepath.Join(tc.finalPath, goldenPath) @@ -101,31 +94,12 @@ func (tc *IntegrationTestCase) CompareFile(goldenPath, working string) { if wantExists && gotExists { if want != got { - if *UpdateGolden { - if err := tc.WriteFile(golden, got); err != nil { - tc.t.Fatal(err) - } - } else { - tc.t.Errorf("expected %s, got %s", want, got) - } + tc.t.Errorf("expected %s, got %s", want, got) } } else if !wantExists && gotExists { - if *UpdateGolden { - if err := tc.WriteFile(golden, got); err != nil { - tc.t.Fatal(err) - } - } else { - tc.t.Errorf("%s created where none was expected", goldenPath) - } + tc.t.Errorf("%s created where none was expected", goldenPath) } else if wantExists && !gotExists { - if *UpdateGolden { - err := os.Remove(golden) - if err != nil { - tc.t.Fatal(err) - } - } else { - tc.t.Errorf("%s not created where one was expected", goldenPath) - } + tc.t.Errorf("%s not created where one was expected", goldenPath) } } @@ -144,7 +118,7 @@ func (tc *IntegrationTestCase) CompareOutput(stdout string) { stdout = normalizeLines(stdout) if expStr != stdout { - tc.t.Errorf("expected: %q but got: %q", expStr, stdout) + tc.t.Errorf("(WNT):\n%s\n(GOT):\n%s\n", expStr, stdout) } } @@ -163,8 +137,12 @@ func (tc *IntegrationTestCase) CompareError(err error, stderr string) { gotExists, got := stderr != "" && err != nil, stderr if wantExists && gotExists { - if !strings.Contains(got, want) { + switch c := strings.Count(got, want); c { + case 0: tc.t.Errorf("expected error containing %s, got error %s", want, got) + case 1: + default: + tc.t.Errorf("expected error %s matches %d times to actual error %s", want, c, got) } } else if !wantExists && gotExists { tc.t.Fatalf("error raised where none was expected: \n%v", stderr) @@ -183,7 +161,7 @@ func (tc *IntegrationTestCase) CompareVendorPaths(gotVendorPaths []string) { } for ind := range gotVendorPaths { if gotVendorPaths[ind] != wantVendorPaths[ind] { - tc.t.Errorf("Mismatch in vendor paths created: want %s got %s", gotVendorPaths, wantVendorPaths) + tc.t.Errorf("Mismatch in vendor paths created: want %s got %s", wantVendorPaths, gotVendorPaths) } } } diff --git a/internal/test/integration_testproj.go b/internal/test/integration_testproj.go index 85fbca8b5d..c6c68db7a4 100644 --- a/internal/test/integration_testproj.go +++ b/internal/test/integration_testproj.go @@ -174,12 +174,10 @@ func (p *IntegrationTestProject) DoRun(args []string) error { if *PrintLogs { if p.stdout.Len() > 0 { - p.t.Log("standard output:") - p.t.Log(p.stdout.String()) + p.t.Logf("\nstandard output:%s", p.stdout.String()) } if p.stderr.Len() > 0 { - p.t.Log("standard error:") - p.t.Log(p.stderr.String()) + p.t.Logf("standard error:\n%s", p.stderr.String()) } } return status diff --git a/internal/test/test.go b/internal/test/test.go index 66a399b4e0..c2c0faa40a 100644 --- a/internal/test/test.go +++ b/internal/test/test.go @@ -77,8 +77,8 @@ func (h *Helper) check(err error) { } } -// parallel runs the test in parallel by calling t.Parallel. -func (h *Helper) parallel() { +// Parallel runs the test in parallel by calling t.Parallel. +func (h *Helper) Parallel() { if h.ran { h.t.Fatalf("%+v", errors.New("internal testsuite error: call to parallel after run")) } diff --git a/lock.go b/lock.go index 27dba4df6b..293f7e195e 100644 --- a/lock.go +++ b/lock.go @@ -15,16 +15,35 @@ import ( "github.com/pkg/errors" ) +// LockName is the lock file name used by dep. const LockName = "Gopkg.lock" +// Lock holds lock file data and implements gps.Lock. type Lock struct { - Memo []byte - P []gps.LockedProject + SolveMeta SolveMeta + P []gps.LockedProject +} + +// SolveMeta holds solver meta data. +type SolveMeta struct { + InputsDigest []byte + AnalyzerName string + AnalyzerVersion int + SolverName string + SolverVersion int } type rawLock struct { - Memo string `toml:"memo"` - Projects []rawLockedProject `toml:"projects"` + SolveMeta solveMeta `toml:"solve-meta"` + Projects []rawLockedProject `toml:"projects"` +} + +type solveMeta struct { + InputsDigest string `toml:"inputs-digest"` + AnalyzerName string `toml:"analyzer-name"` + AnalyzerVersion int `toml:"analyzer-version"` + SolverName string `toml:"solver-name"` + SolverVersion int `toml:"solver-version"` } type rawLockedProject struct { @@ -58,11 +77,16 @@ func fromRawLock(raw rawLock) (*Lock, error) { P: make([]gps.LockedProject, len(raw.Projects)), } - l.Memo, err = hex.DecodeString(raw.Memo) + l.SolveMeta.InputsDigest, err = hex.DecodeString(raw.SolveMeta.InputsDigest) if err != nil { return nil, errors.Errorf("invalid hash digest in lock's memo field") } + l.SolveMeta.AnalyzerName = raw.SolveMeta.AnalyzerName + l.SolveMeta.AnalyzerVersion = raw.SolveMeta.AnalyzerVersion + l.SolveMeta.SolverName = raw.SolveMeta.SolverName + l.SolveMeta.SolverVersion = raw.SolveMeta.SolverVersion + for i, ld := range raw.Projects { r := gps.Revision(ld.Revision) @@ -84,13 +108,16 @@ func fromRawLock(raw rawLock) (*Lock, error) { } l.P[i] = gps.NewLockedProject(id, v, ld.Packages) } + return l, nil } +// InputHash returns the hash of inputs which produced this lock data. func (l *Lock) InputHash() []byte { - return l.Memo + return l.SolveMeta.InputsDigest } +// Projects returns the list of LockedProjects contained in the lock data. func (l *Lock) Projects() []gps.LockedProject { return l.P } @@ -112,7 +139,13 @@ func (l *Lock) HasProjectWithRoot(root gps.ProjectRoot) bool { // toRaw converts the manifest into a representation suitable to write to the lock file func (l *Lock) toRaw() rawLock { raw := rawLock{ - Memo: hex.EncodeToString(l.Memo), + SolveMeta: solveMeta{ + InputsDigest: hex.EncodeToString(l.SolveMeta.InputsDigest), + AnalyzerName: l.SolveMeta.AnalyzerName, + AnalyzerVersion: l.SolveMeta.AnalyzerVersion, + SolverName: l.SolveMeta.SolverName, + SolverVersion: l.SolveMeta.SolverVersion, + }, Projects: make([]rawLockedProject, len(l.P)), } @@ -132,44 +165,40 @@ func (l *Lock) toRaw() rawLock { raw.Projects[k] = ld } - // TODO sort output - #15 - return raw } +// MarshalTOML serializes this lock into TOML via an intermediate raw form. func (l *Lock) MarshalTOML() ([]byte, error) { raw := l.toRaw() result, err := toml.Marshal(raw) return result, errors.Wrap(err, "Unable to marshal lock to TOML string") } -// LockFromInterface converts an arbitrary gps.Lock to dep's representation of a -// lock. If the input is already dep's *lock, the input is returned directly. +// LockFromSolution converts a gps.Solution to dep's representation of a lock. // // Data is defensively copied wherever necessary to ensure the resulting *lock // shares no memory with the original lock. -// -// As gps.Solution is a superset of gps.Lock, this can also be used to convert -// solutions to dep's lock format. -func LockFromInterface(in gps.Lock) *Lock { - if in == nil { - return nil - } else if l, ok := in.(*Lock); ok { - return l - } - +func LockFromSolution(in gps.Solution) *Lock { h, p := in.InputHash(), in.Projects() l := &Lock{ - Memo: make([]byte, len(h)), - P: make([]gps.LockedProject, len(p)), + SolveMeta: SolveMeta{ + InputsDigest: make([]byte, len(h)), + AnalyzerName: in.AnalyzerName(), + AnalyzerVersion: in.AnalyzerVersion(), + SolverName: in.SolverName(), + SolverVersion: in.SolverVersion(), + }, + P: make([]gps.LockedProject, len(p)), } - copy(l.Memo, h) + copy(l.SolveMeta.InputsDigest, h) copy(l.P, p) return l } +// SortedLockedProjects implements sort.Interface. type SortedLockedProjects []gps.LockedProject func (s SortedLockedProjects) Len() int { return len(s) } diff --git a/lock_test.go b/lock_test.go index 2f035756b6..c15a1e6a62 100644 --- a/lock_test.go +++ b/lock_test.go @@ -28,7 +28,9 @@ func TestReadLock(t *testing.T) { b, _ := hex.DecodeString("2252a285ab27944a4d7adcba8dbd03980f59ba652f12db39fa93b927c345593e") want := &Lock{ - Memo: b, + SolveMeta: SolveMeta{ + InputsDigest: b, + }, P: []gps.LockedProject{ gps.NewLockedProject( gps.ProjectIdentifier{ProjectRoot: gps.ProjectRoot("github.com/golang/dep/internal/gps")}, @@ -52,7 +54,9 @@ func TestReadLock(t *testing.T) { b, _ = hex.DecodeString("2252a285ab27944a4d7adcba8dbd03980f59ba652f12db39fa93b927c345593e") want = &Lock{ - Memo: b, + SolveMeta: SolveMeta{ + InputsDigest: b, + }, P: []gps.LockedProject{ gps.NewLockedProject( gps.ProjectIdentifier{ProjectRoot: gps.ProjectRoot("github.com/golang/dep/internal/gps")}, @@ -75,7 +79,9 @@ func TestWriteLock(t *testing.T) { want := h.GetTestFileString(golden) memo, _ := hex.DecodeString("2252a285ab27944a4d7adcba8dbd03980f59ba652f12db39fa93b927c345593e") l := &Lock{ - Memo: memo, + SolveMeta: SolveMeta{ + InputsDigest: memo, + }, P: []gps.LockedProject{ gps.NewLockedProject( gps.ProjectIdentifier{ProjectRoot: gps.ProjectRoot("github.com/golang/dep/internal/gps")}, @@ -104,7 +110,9 @@ func TestWriteLock(t *testing.T) { want = h.GetTestFileString(golden) memo, _ = hex.DecodeString("2252a285ab27944a4d7adcba8dbd03980f59ba652f12db39fa93b927c345593e") l = &Lock{ - Memo: memo, + SolveMeta: SolveMeta{ + InputsDigest: memo, + }, P: []gps.LockedProject{ gps.NewLockedProject( gps.ProjectIdentifier{ProjectRoot: gps.ProjectRoot("github.com/golang/dep/internal/gps")}, diff --git a/manifest.go b/manifest.go index c77ac522a6..e0aac79b8b 100644 --- a/manifest.go +++ b/manifest.go @@ -9,6 +9,7 @@ import ( "fmt" "io" "reflect" + "regexp" "sort" "github.com/golang/dep/internal/gps" @@ -16,20 +17,22 @@ import ( "github.com/pkg/errors" ) +// ManifestName is the manifest file name used by dep. const ManifestName = "Gopkg.toml" +// Manifest holds manifest file data and implements gps.RootManifest. type Manifest struct { - Dependencies gps.ProjectConstraints - Ovr gps.ProjectConstraints - Ignored []string - Required []string + Constraints gps.ProjectConstraints + Ovr gps.ProjectConstraints + Ignored []string + Required []string } type rawManifest struct { - Dependencies []rawProject `toml:"dependencies,omitempty"` - Overrides []rawProject `toml:"overrides,omitempty"` - Ignored []string `toml:"ignored,omitempty"` - Required []string `toml:"required,omitempty"` + Constraints []rawProject `toml:"constraint,omitempty"` + Overrides []rawProject `toml:"override,omitempty"` + Ignored []string `toml:"ignored,omitempty"` + Required []string `toml:"required,omitempty"` } type rawProject struct { @@ -50,6 +53,8 @@ func validateManifest(s string) ([]error, error) { // Convert tree to a map manifest := tree.ToMap() + // match abbreviated git hash (7chars) or hg hash (12chars) + abbrevRevHash := regexp.MustCompile("^[a-f0-9]{7}([a-f0-9]{5})?$") // Look for unknown fields and collect errors for prop, val := range manifest { switch prop { @@ -58,7 +63,7 @@ func validateManifest(s string) ([]error, error) { if reflect.TypeOf(val).Kind() != reflect.Map { errs = append(errs, errors.New("metadata should be a TOML table")) } - case "dependencies", "overrides": + case "constraint", "override": // Invalid if type assertion fails. Not a TOML array of tables. if rawProj, ok := val.([]interface{}); ok { // Iterate through each array of tables @@ -67,8 +72,14 @@ func validateManifest(s string) ([]error, error) { for key, value := range v.(map[string]interface{}) { // Check if the key is valid switch key { - case "name", "branch", "revision", "version", "source": + case "name", "branch", "version", "source": // valid key + case "revision": + if valueStr, ok := value.(string); ok { + if abbrevRevHash.MatchString(valueStr) { + errs = append(errs, fmt.Errorf("revision %q should not be in abbreviated form", valueStr)) + } + } case "metadata": // Check if metadata is of Map type if reflect.TypeOf(value).Kind() != reflect.Map { @@ -117,21 +128,21 @@ func readManifest(r io.Reader) (*Manifest, []error, error) { func fromRawManifest(raw rawManifest) (*Manifest, error) { m := &Manifest{ - Dependencies: make(gps.ProjectConstraints, len(raw.Dependencies)), - Ovr: make(gps.ProjectConstraints, len(raw.Overrides)), - Ignored: raw.Ignored, - Required: raw.Required, + Constraints: make(gps.ProjectConstraints, len(raw.Constraints)), + Ovr: make(gps.ProjectConstraints, len(raw.Overrides)), + Ignored: raw.Ignored, + Required: raw.Required, } - for i := 0; i < len(raw.Dependencies); i++ { - name, prj, err := toProject(raw.Dependencies[i]) + for i := 0; i < len(raw.Constraints); i++ { + name, prj, err := toProject(raw.Constraints[i]) if err != nil { return nil, err } - if _, exists := m.Dependencies[name]; exists { + if _, exists := m.Constraints[name]; exists { return nil, errors.Errorf("multiple dependencies specified for %s, can only specify one", name) } - m.Dependencies[name] = prj + m.Constraints[name] = prj } for i := 0; i < len(raw.Overrides); i++ { @@ -162,7 +173,7 @@ func toProject(raw rawProject) (n gps.ProjectRoot, pp gps.ProjectProperties, err } // always semver if we can - pp.Constraint, err = gps.NewSemverConstraint(raw.Version) + pp.Constraint, err = gps.NewSemverConstraintIC(raw.Version) if err != nil { // but if not, fall back on plain versions pp.Constraint = gps.NewVersion(raw.Version) @@ -182,15 +193,15 @@ func toProject(raw rawProject) (n gps.ProjectRoot, pp gps.ProjectProperties, err // toRaw converts the manifest into a representation suitable to write to the manifest file func (m *Manifest) toRaw() rawManifest { raw := rawManifest{ - Dependencies: make([]rawProject, 0, len(m.Dependencies)), - Overrides: make([]rawProject, 0, len(m.Ovr)), - Ignored: m.Ignored, - Required: m.Required, + Constraints: make([]rawProject, 0, len(m.Constraints)), + Overrides: make([]rawProject, 0, len(m.Ovr)), + Ignored: m.Ignored, + Required: m.Required, } - for n, prj := range m.Dependencies { - raw.Dependencies = append(raw.Dependencies, toRawProject(n, prj)) + for n, prj := range m.Constraints { + raw.Constraints = append(raw.Constraints, toRawProject(n, prj)) } - sort.Sort(sortedRawProjects(raw.Dependencies)) + sort.Sort(sortedRawProjects(raw.Constraints)) for n, prj := range m.Ovr { raw.Overrides = append(raw.Overrides, toRawProject(n, prj)) @@ -217,6 +228,7 @@ func (s sortedRawProjects) Less(i, j int) bool { return l.Source < r.Source } +// MarshalTOML serializes this manifest into TOML via an intermediate raw form. func (m *Manifest) MarshalTOML() ([]byte, error) { raw := m.toRaw() result, err := toml.Marshal(raw) @@ -236,7 +248,7 @@ func toRawProject(name gps.ProjectRoot, project gps.ProjectProperties) rawProjec case gps.IsBranch: raw.Branch = v.String() case gps.IsSemver, gps.IsVersion: - raw.Version = v.String() + raw.Version = v.ImpliedCaretString() } return raw } @@ -248,24 +260,28 @@ func toRawProject(name gps.ProjectRoot, project gps.ProjectProperties) rawProjec // if !gps.IsAny(pp.Constraint) && !gps.IsNone(pp.Constraint) { if !gps.IsAny(project.Constraint) && project.Constraint != nil { // Has to be a semver range. - raw.Version = project.Constraint.String() + raw.Version = project.Constraint.ImpliedCaretString() } return raw } +// DependencyConstraints returns a list of project-level constraints. func (m *Manifest) DependencyConstraints() gps.ProjectConstraints { - return m.Dependencies + return m.Constraints } +// TestDependencyConstraints remains unimplemented by returning nil for now. func (m *Manifest) TestDependencyConstraints() gps.ProjectConstraints { // TODO decide whether we're going to incorporate this or not return nil } +// Overrides returns a list of project-level override constraints. func (m *Manifest) Overrides() gps.ProjectConstraints { return m.Ovr } +// IgnoredPackages returns a set of import paths to ignore. func (m *Manifest) IgnoredPackages() map[string]bool { if len(m.Ignored) == 0 { return nil @@ -282,7 +298,7 @@ func (m *Manifest) IgnoredPackages() map[string]bool { // HasConstraintsOn checks if the manifest contains either constraints or // overrides on the provided ProjectRoot. func (m *Manifest) HasConstraintsOn(root gps.ProjectRoot) bool { - if _, has := m.Dependencies[root]; has { + if _, has := m.Constraints[root]; has { return true } if _, has := m.Ovr[root]; has { @@ -292,6 +308,7 @@ func (m *Manifest) HasConstraintsOn(root gps.ProjectRoot) bool { return false } +// RequiredPackages returns a set of import paths to require. func (m *Manifest) RequiredPackages() map[string]bool { if len(m.Required) == 0 { return nil diff --git a/manifest_test.go b/manifest_test.go index 3eb87df105..8413c2bf60 100644 --- a/manifest_test.go +++ b/manifest_test.go @@ -25,9 +25,9 @@ func TestReadManifest(t *testing.T) { t.Fatalf("Should have read Manifest correctly, but got err %q", err) } - c, _ := gps.NewSemverConstraint(">=0.12.0, <1.0.0") + c, _ := gps.NewSemverConstraint("^0.12.0") want := Manifest{ - Dependencies: map[gps.ProjectRoot]gps.ProjectProperties{ + Constraints: map[gps.ProjectRoot]gps.ProjectProperties{ gps.ProjectRoot("github.com/golang/dep/internal/gps"): { Constraint: c, }, @@ -44,7 +44,7 @@ func TestReadManifest(t *testing.T) { Ignored: []string{"github.com/foo/bar"}, } - if !reflect.DeepEqual(got.Dependencies, want.Dependencies) { + if !reflect.DeepEqual(got.Constraints, want.Constraints) { t.Error("Valid manifest's dependencies did not parse as expected") } if !reflect.DeepEqual(got.Ovr, want.Ovr) { @@ -61,9 +61,9 @@ func TestWriteManifest(t *testing.T) { golden := "manifest/golden.toml" want := h.GetTestFileString(golden) - c, _ := gps.NewSemverConstraint("^v0.12.0") + c, _ := gps.NewSemverConstraint("^0.12.0") m := &Manifest{ - Dependencies: map[gps.ProjectRoot]gps.ProjectProperties{ + Constraints: map[gps.ProjectRoot]gps.ProjectProperties{ gps.ProjectRoot("github.com/golang/dep/internal/gps"): { Constraint: c, }, @@ -128,7 +128,7 @@ func TestValidateManifest(t *testing.T) { }{ { tomlString: ` - [[dependencies]] + [[constraint]] name = "github.com/foo/bar" `, want: []error{}, @@ -149,7 +149,7 @@ func TestValidateManifest(t *testing.T) { [[bar]] author = "xyz" - [[dependencies]] + [[constraint]] name = "github.com/foo/bar" version = "" `, @@ -163,52 +163,68 @@ func TestValidateManifest(t *testing.T) { tomlString: ` metadata = "project-name" - [[dependencies]] + [[constraint]] name = "github.com/foo/bar" `, want: []error{errors.New("metadata should be a TOML table")}, }, { tomlString: ` - dependencies = "foo" - overrides = "bar" + constraint = "foo" + override = "bar" `, want: []error{ - errors.New("dependencies should be a TOML array of tables"), - errors.New("overrides should be a TOML array of tables"), + errors.New("constraint should be a TOML array of tables"), + errors.New("override should be a TOML array of tables"), }, }, { tomlString: ` - [[dependencies]] + [[constraint]] name = "github.com/foo/bar" location = "some-value" link = "some-other-value" metadata = "foo" - [[overrides]] + [[override]] nick = "foo" `, want: []error{ - errors.New("Invalid key \"location\" in \"dependencies\""), - errors.New("Invalid key \"link\" in \"dependencies\""), - errors.New("Invalid key \"nick\" in \"overrides\""), - errors.New("metadata in \"dependencies\" should be a TOML table"), + errors.New("Invalid key \"location\" in \"constraint\""), + errors.New("Invalid key \"link\" in \"constraint\""), + errors.New("Invalid key \"nick\" in \"override\""), + errors.New("metadata in \"constraint\" should be a TOML table"), }, }, { tomlString: ` - [[dependencies]] + [[constraint]] name = "github.com/foo/bar" - [dependencies.metadata] + [constraint.metadata] color = "blue" `, want: []error{}, }, + { + tomlString: ` + [[constraint]] + name = "github.com/foo/bar" + revision = "b86ad16" + `, + want: []error{errors.New("revision \"b86ad16\" should not be in abbreviated form")}, + }, + { + tomlString: ` + [[constraint]] + name = "foobar.com/hg" + revision = "8d43f8c0b836" + `, + want: []error{errors.New("revision \"8d43f8c0b836\" should not be in abbreviated form")}, + }, } - // constains for error + // contains for error contains := func(s []error, e error) bool { for _, a := range s { if a.Error() == e.Error() { diff --git a/project.go b/project.go index 26d965fb16..93a0ad3b1e 100644 --- a/project.go +++ b/project.go @@ -9,6 +9,7 @@ import ( "os" "path/filepath" + "github.com/golang/dep/internal/fs" "github.com/golang/dep/internal/gps" ) @@ -70,22 +71,24 @@ func (p *Project) MakeParams() gps.SolveParameters { // creates a backup of it to a new directory with the provided suffix. func BackupVendor(vpath, suffix string) (string, error) { // Check if there's a non-empty vendor directory - vendorExists, err := IsNonEmptyDir(vpath) + vendorExists, err := fs.IsNonEmptyDir(vpath) if err != nil { return "", err } if vendorExists { - vendorbak := vpath + "-" + suffix + // vpath is a full filepath. We need to split it to prefix the backup dir + // with an "_" + vpathDir, name := filepath.Split(vpath) + vendorbak := filepath.Join(vpathDir, "_"+name+"-"+suffix) // Check if a directory with same name exists if _, err = os.Stat(vendorbak); os.IsNotExist(err) { - // Rename existing vendor to vendor-{suffix} - if err := renameWithFallback(vpath, vendorbak); err != nil { + // Copy existing vendor to vendor-{suffix} + if err := fs.CopyDir(vpath, vendorbak); err != nil { return "", err } return vendorbak, nil - } else { - return "", errVendorBackupFailed } + return "", errVendorBackupFailed } return "", nil diff --git a/project_test.go b/project_test.go index 6a93a6b453..889282c570 100644 --- a/project_test.go +++ b/project_test.go @@ -120,7 +120,7 @@ func TestBackupVendor(t *testing.T) { } // Create a backup - wantName := "vendor-sfx" + wantName := "_vendor-sfx" vendorbak, err := BackupVendor("vendor", "sfx") if err != nil { t.Fatal(err) @@ -138,10 +138,6 @@ func TestBackupVendor(t *testing.T) { t.Fatal(err) } - // Create another vendor directory. Previous vendor moved as backup. - os.MkdirAll("vendor", 0777) - pc.CopyFile(dummyFile, "txn_writer/badinput_fileroot") - // Should return error on creating backup with existing filename vendorbak, err = BackupVendor("vendor", "sfx") diff --git a/testdata/analyzer/Gopkg.toml b/testdata/analyzer/Gopkg.toml index f286cc0aae..7edfc9a7a7 100644 --- a/testdata/analyzer/Gopkg.toml +++ b/testdata/analyzer/Gopkg.toml @@ -1,8 +1,8 @@ -[[dependencies]] +[[constraint]] name = "github.com/golang/dep/internal/gps" version = ">=0.12.0, <1.0.0" -[[dependencies]] +[[constraint]] name = "github.com/pkg/errors" version = ">=0.8.0, <1.0.0" diff --git a/testdata/lock/error0.toml b/testdata/lock/error0.toml index 8aca39428c..141d5bc78a 100644 --- a/testdata/lock/error0.toml +++ b/testdata/lock/error0.toml @@ -1,4 +1,5 @@ -memo = "2252a285ab27944a4d7adcba8dbd03980f59ba652f12db39fa93b927c345593e" +[solve-meta] + inputs-digest = "2252a285ab27944a4d7adcba8dbd03980f59ba652f12db39fa93b927c345593e" [[projects]] name = "github.com/golang/dep/internal/gps" diff --git a/testdata/lock/error1.toml b/testdata/lock/error1.toml index 344ed1118b..cf54d870cf 100644 --- a/testdata/lock/error1.toml +++ b/testdata/lock/error1.toml @@ -1,7 +1,9 @@ -memo = "000aaa2a285ab27944a4d7adcba8dbd03980f59ba652f12db39fa93b927c345593e" - [[projects]] name = "github.com/golang/dep/internal/gps" branch = "master" revision = "d05d5aca9f895d19e9265839bffeadd74a2d2ecb" packages = ["."] + +[solve-meta] + inputs-digest = "000aaa2a285ab27944a4d7adcba8dbd03980f59ba652f12db39fa93b927c345593e" + diff --git a/testdata/lock/error2.toml b/testdata/lock/error2.toml index d88d3e437a..788fc8c7c8 100644 --- a/testdata/lock/error2.toml +++ b/testdata/lock/error2.toml @@ -1,5 +1,7 @@ -memo = "2252a285ab27944a4d7adcba8dbd03980f59ba652f12db39fa93b927c345593e" - [[projects]] name = "github.com/golang/dep/internal/gps" packages = ["."] + +[solve-meta] + inputs-digest = "2252a285ab27944a4d7adcba8dbd03980f59ba652f12db39fa93b927c345593e" + diff --git a/testdata/lock/golden0.toml b/testdata/lock/golden0.toml index 35ffb6257c..2011593b48 100644 --- a/testdata/lock/golden0.toml +++ b/testdata/lock/golden0.toml @@ -1,7 +1,13 @@ -memo = "2252a285ab27944a4d7adcba8dbd03980f59ba652f12db39fa93b927c345593e" [[projects]] branch = "master" name = "github.com/golang/dep/internal/gps" packages = ["."] revision = "d05d5aca9f895d19e9265839bffeadd74a2d2ecb" + +[solve-meta] + analyzer-name = "" + analyzer-version = 0 + inputs-digest = "2252a285ab27944a4d7adcba8dbd03980f59ba652f12db39fa93b927c345593e" + solver-name = "" + solver-version = 0 diff --git a/testdata/lock/golden1.toml b/testdata/lock/golden1.toml index dfa34d8859..fe2941c26f 100644 --- a/testdata/lock/golden1.toml +++ b/testdata/lock/golden1.toml @@ -1,7 +1,13 @@ -memo = "2252a285ab27944a4d7adcba8dbd03980f59ba652f12db39fa93b927c345593e" [[projects]] name = "github.com/golang/dep/internal/gps" packages = ["."] revision = "d05d5aca9f895d19e9265839bffeadd74a2d2ecb" version = "0.12.2" + +[solve-meta] + analyzer-name = "" + analyzer-version = 0 + inputs-digest = "2252a285ab27944a4d7adcba8dbd03980f59ba652f12db39fa93b927c345593e" + solver-name = "" + solver-version = 0 diff --git a/testdata/manifest/error1.toml b/testdata/manifest/error1.toml index a8336c1ed4..f0c731eb95 100644 --- a/testdata/manifest/error1.toml +++ b/testdata/manifest/error1.toml @@ -1,13 +1,13 @@ ignored = ["github.com/foo/bar"] -[[dependencies]] +[[constraint]] name = "github.com/golang/dep/internal/gps" branch = "master" revision = "d05d5aca9f895d19e9265839bffeadd74a2d2ecb" version = "^v0.12.0" source = "https://github.com/golang/dep/internal/gps" -[[overrides]] +[[override]] name = "github.com/golang/dep/internal/gps" branch = "master" revision = "d05d5aca9f895d19e9265839bffeadd74a2d2ecb" diff --git a/testdata/manifest/error2.toml b/testdata/manifest/error2.toml index f46a0f8855..d0d799d496 100644 --- a/testdata/manifest/error2.toml +++ b/testdata/manifest/error2.toml @@ -1,9 +1,9 @@ ignored = ["github.com/foo/bar"] -[[dependencies]] +[[constraint]] name = "github.com/golang/dep/internal/gps" branch = "master" -[[dependencies]] +[[constraint]] name = "github.com/golang/dep/internal/gps" branch = "master" diff --git a/testdata/manifest/golden.toml b/testdata/manifest/golden.toml index 98b7cd3e09..8614f1fdd7 100644 --- a/testdata/manifest/golden.toml +++ b/testdata/manifest/golden.toml @@ -1,14 +1,14 @@ ignored = ["github.com/foo/bar"] -[[dependencies]] +[[constraint]] name = "github.com/babble/brook" revision = "d05d5aca9f895d19e9265839bffeadd74a2d2ecb" -[[dependencies]] +[[constraint]] name = "github.com/golang/dep/internal/gps" - version = ">=0.12.0, <1.0.0" + version = "0.12.0" -[[overrides]] +[[override]] branch = "master" name = "github.com/golang/dep/internal/gps" source = "https://github.com/golang/dep/internal/gps" diff --git a/testdata/txn_writer/expected_lock.toml b/testdata/txn_writer/expected_lock.toml index bda7ec14cb..8c9310fd3b 100644 --- a/testdata/txn_writer/expected_lock.toml +++ b/testdata/txn_writer/expected_lock.toml @@ -1,7 +1,15 @@ -memo = "595716d270828e763c811ef79c9c41f85b1d1bfbdfe85280036405c03772206c" +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + [[projects]] name = "github.com/sdboyer/dep-test" packages = ["."] revision = "2a3a211e171803acb82d1d5d42ceb53228f51751" version = "1.0.0" + +[solve-meta] + analyzer-name = "" + analyzer-version = 0 + inputs-digest = "595716d270828e763c811ef79c9c41f85b1d1bfbdfe85280036405c03772206c" + solver-name = "" + solver-version = 0 diff --git a/testdata/txn_writer/expected_manifest.toml b/testdata/txn_writer/expected_manifest.toml index e0e080df27..4d8b903e5c 100644 --- a/testdata/txn_writer/expected_manifest.toml +++ b/testdata/txn_writer/expected_manifest.toml @@ -1,6 +1,13 @@ ## Gopkg.toml example (these lines may be deleted) +## "metadata" defines metadata about the project that could be used by other independent +## systems. The metadata defined here will be ignored by dep. +# [metadata] +# key1 = "value that convey data to other systems" +# system1-data = "value that is used by a system" +# system2-data = "value that is used by another system" + ## "required" lists a set of packages (not projects) that must be included in ## Gopkg.lock. This list is merged with the set of packages imported by the current ## project. Use it when your project needs a package it doesn't explicitly import - @@ -12,9 +19,10 @@ ## or in a dependency. # ignored = ["github.com/user/project/badpkg"] -## Dependencies define constraints on dependent projects. They are respected by +## Constraints are rules for how directly imported projects +## may be incorporated into the depgraph. They are respected by ## dep whether coming from the Gopkg.toml of the current project or a dependency. -# [[dependencies]] +# [[constraint]] ## Required: the root import path of the project being constrained. # name = "github.com/user/project" # @@ -26,18 +34,25 @@ # ## Optional: an alternate location (URL or import path) for the project's source. # source = "https://github.com/myfork/package.git" +# +## "metadata" defines metadata about the dependency or override that could be used +## by other independent systems. The metadata defined here will be ignored by dep. +# [metadata] +# key1 = "value that convey data to other systems" +# system1-data = "value that is used by a system" +# system2-data = "value that is used by another system" -## Overrides have the same structure as [[dependencies]], but supercede all -## [[dependencies]] declarations from all projects. Only the current project's -## [[overrides]] are applied. +## Overrides have the same structure as [[constraint]], but supersede all +## [[constraint]] declarations from all projects. Only [[override]] from +## the current project's are applied. ## ## Overrides are a sledgehammer. Use them only as a last resort. -# [[overrides]] +# [[override]] ## Required: the root import path of the project being constrained. # name = "github.com/user/project" # ## Optional: specifying a version constraint override will cause all other -## constraints on this project to be ignored; only the overriden constraint +## constraints on this project to be ignored; only the overridden constraint ## need be satisfied. ## Again, only one of "branch", "version" or "revision" can be specified. # version = "1.0.0" @@ -51,6 +66,6 @@ -[[dependencies]] +[[constraint]] name = "github.com/sdboyer/dep-test" version = "1.0.0" diff --git a/testdata/txn_writer/original_lock.toml b/testdata/txn_writer/original_lock.toml index 1a9384efa7..2651064873 100644 --- a/testdata/txn_writer/original_lock.toml +++ b/testdata/txn_writer/original_lock.toml @@ -1,4 +1,5 @@ -memo = "595716d270828e763c811ef79c9c41f85b1d1bfbdfe85280036405c03772206c" +[solve-meta] + inputs-digest = "595716d270828e763c811ef79c9c41f85b1d1bfbdfe85280036405c03772206c" [[projects]] name = "github.com/foo/bar" diff --git a/testdata/txn_writer/updated_lock.toml b/testdata/txn_writer/updated_lock.toml index da44aa9a21..81ae83ba34 100644 --- a/testdata/txn_writer/updated_lock.toml +++ b/testdata/txn_writer/updated_lock.toml @@ -1,4 +1,5 @@ -memo = "2252a285ab27944a4d7adcba8dbd03980f59ba652f12db39fa93b927c345593e" +[solve-meta] + inputs-digest = "2252a285ab27944a4d7adcba8dbd03980f59ba652f12db39fa93b927c345593e" [[projects]] name = "github.com/foo/bar" diff --git a/txn_writer.go b/txn_writer.go index 510b58cc3b..342b1c5116 100644 --- a/txn_writer.go +++ b/txn_writer.go @@ -14,6 +14,7 @@ import ( "sort" "strings" + "github.com/golang/dep/internal/fs" "github.com/golang/dep/internal/gps" "github.com/pelletier/go-toml" "github.com/pkg/errors" @@ -22,9 +23,16 @@ import ( // Example string to be written to the manifest file // if no dependencies are found in the project // during `dep init` -const exampleTOML = ` +var exampleTOML = []byte(` ## Gopkg.toml example (these lines may be deleted) +## "metadata" defines metadata about the project that could be used by other independent +## systems. The metadata defined here will be ignored by dep. +# [metadata] +# key1 = "value that convey data to other systems" +# system1-data = "value that is used by a system" +# system2-data = "value that is used by another system" + ## "required" lists a set of packages (not projects) that must be included in ## Gopkg.lock. This list is merged with the set of packages imported by the current ## project. Use it when your project needs a package it doesn't explicitly import - @@ -36,9 +44,10 @@ const exampleTOML = ` ## or in a dependency. # ignored = ["github.com/user/project/badpkg"] -## Dependencies define constraints on dependent projects. They are respected by +## Constraints are rules for how directly imported projects +## may be incorporated into the depgraph. They are respected by ## dep whether coming from the Gopkg.toml of the current project or a dependency. -# [[dependencies]] +# [[constraint]] ## Required: the root import path of the project being constrained. # name = "github.com/user/project" # @@ -50,18 +59,25 @@ const exampleTOML = ` # ## Optional: an alternate location (URL or import path) for the project's source. # source = "https://github.com/myfork/package.git" - -## Overrides have the same structure as [[dependencies]], but supercede all -## [[dependencies]] declarations from all projects. Only the current project's -## [[overrides]] are applied. +# +## "metadata" defines metadata about the dependency or override that could be used +## by other independent systems. The metadata defined here will be ignored by dep. +# [metadata] +# key1 = "value that convey data to other systems" +# system1-data = "value that is used by a system" +# system2-data = "value that is used by another system" + +## Overrides have the same structure as [[constraint]], but supersede all +## [[constraint]] declarations from all projects. Only [[override]] from +## the current project's are applied. ## ## Overrides are a sledgehammer. Use them only as a last resort. -# [[overrides]] +# [[override]] ## Required: the root import path of the project being constrained. # name = "github.com/user/project" # ## Optional: specifying a version constraint override will cause all other -## constraints on this project to be ignored; only the overriden constraint +## constraints on this project to be ignored; only the overridden constraint ## need be satisfied. ## Again, only one of "branch", "version" or "revision" can be specified. # version = "1.0.0" @@ -74,7 +90,12 @@ const exampleTOML = ` # source = "https://github.com/myfork/package.git" -` +`) + +// String added on top of lock file +var lockFileComment = []byte(`# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + +`) // SafeWriter transactionalizes writes of manifest, lock, and vendor dir, both // individually and in any combination, into a pseudo-atomic action with @@ -84,43 +105,47 @@ const exampleTOML = ` // guard against non-arcane failure conditions. type SafeWriter struct { Manifest *Manifest - Lock *Lock - LockDiff *gps.LockDiff - WriteVendor bool + lock *Lock + lockDiff *gps.LockDiff + writeVendor bool } // NewSafeWriter sets up a SafeWriter to write a set of config yaml, lock and vendor tree. // // - If manifest is provided, it will be written to the standard manifest file -// name beneath root. +// name beneath root. +// // - If newLock is provided, it will be written to the standard lock file -// name beneath root. +// name beneath root. +// // - If vendor is VendorAlways, or is VendorOnChanged and the locks are different, -// the vendor directory will be written beneath root based on newLock. +// the vendor directory will be written beneath root based on newLock. +// // - If oldLock is provided without newLock, error. +// // - If vendor is VendorAlways without a newLock, error. func NewSafeWriter(manifest *Manifest, oldLock, newLock *Lock, vendor VendorBehavior) (*SafeWriter, error) { sw := &SafeWriter{ Manifest: manifest, - Lock: newLock, + lock: newLock, } if oldLock != nil { if newLock == nil { return nil, errors.New("must provide newLock when oldLock is specified") } - sw.LockDiff = gps.DiffLocks(oldLock, newLock) + sw.lockDiff = gps.DiffLocks(oldLock, newLock) } switch vendor { case VendorAlways: - sw.WriteVendor = true + sw.writeVendor = true case VendorOnChanged: - if sw.LockDiff != nil || (newLock != nil && oldLock == nil) { - sw.WriteVendor = true + if sw.lockDiff != nil || (newLock != nil && oldLock == nil) { + sw.writeVendor = true } } - if sw.WriteVendor && newLock == nil { + if sw.writeVendor && newLock == nil { return nil, errors.New("must provide newLock in order to write out vendor") } @@ -129,7 +154,7 @@ func NewSafeWriter(manifest *Manifest, oldLock, newLock *Lock, vendor VendorBeha // HasLock checks if a Lock is present in the SafeWriter func (sw *SafeWriter) HasLock() bool { - return sw.Lock != nil + return sw.lock != nil } // HasManifest checks if a Manifest is present in the SafeWriter @@ -137,23 +162,15 @@ func (sw *SafeWriter) HasManifest() bool { return sw.Manifest != nil } -// HasVendor returns the if SafeWriter should write to vendor -func (sw *SafeWriter) HasVendor() bool { - return sw.WriteVendor -} - type rawStringDiff struct { *gps.StringDiff } +// MarshalTOML serializes the diff as a string. func (diff rawStringDiff) MarshalTOML() ([]byte, error) { return []byte(diff.String()), nil } -type rawLockDiff struct { - *gps.LockDiff -} - type rawLockedProjectDiff struct { Name gps.ProjectRoot `toml:"name"` Source *rawStringDiff `toml:"source,omitempty"` @@ -262,14 +279,14 @@ func (sw SafeWriter) validate(root string, sm gps.SourceManager) error { if root == "" { return errors.New("root path must be non-empty") } - if is, err := IsDir(root); !is { + if is, err := fs.IsDir(root); !is { if err != nil { return err } return errors.Errorf("root path %q does not exist", root) } - if sw.HasVendor() && sm == nil { + if sw.writeVendor && sm == nil { return errors.New("must provide a SourceManager if writing out a vendor dir") } @@ -284,13 +301,13 @@ func (sw SafeWriter) validate(root string, sm gps.SourceManager) error { // operations succeeded. It also does its best to roll back if any moves fail. // This mostly guarantees that dep cannot exit with a partial write that would // leave an undefined state on disk. -func (sw *SafeWriter) Write(root string, sm gps.SourceManager, noExamples bool) error { +func (sw *SafeWriter) Write(root string, sm gps.SourceManager, examples bool) error { err := sw.validate(root, sm) if err != nil { return err } - if !sw.HasManifest() && !sw.HasLock() && !sw.HasVendor() { + if !sw.HasManifest() && !sw.HasLock() && !sw.writeVendor { // nothing to do return nil } @@ -312,28 +329,31 @@ func (sw *SafeWriter) Write(root string, sm gps.SourceManager, noExamples bool) return errors.Wrap(err, "failed to marshal manifest to TOML") } - var initOutput string + var initOutput []byte - // If examples are NOT disabled, use the example text - if !noExamples { + // If examples are enabled, use the example text + if examples { initOutput = exampleTOML } - // 0666 is before umask; mirrors behavior of os.Create (used by - // writeFile()) - if err = ioutil.WriteFile(filepath.Join(td, ManifestName), append([]byte(initOutput), tb...), 0666); err != nil { + if err = ioutil.WriteFile(filepath.Join(td, ManifestName), append(initOutput, tb...), 0666); err != nil { return errors.Wrap(err, "failed to write manifest file to temp dir") } } if sw.HasLock() { - if err := writeFile(filepath.Join(td, LockName), sw.Lock); err != nil { + l, err := sw.lock.MarshalTOML() + if err != nil { + return errors.Wrap(err, "failed to marshal lock to TOML") + } + + if err = ioutil.WriteFile(filepath.Join(td, LockName), append(lockFileComment, l...), 0666); err != nil { return errors.Wrap(err, "failed to write lock file to temp dir") } } - if sw.HasVendor() { - err = gps.WriteDepTree(filepath.Join(td, "vendor"), sw.Lock, sm, true) + if sw.writeVendor { + err = gps.WriteDepTree(filepath.Join(td, "vendor"), sw.lock, sm, true) if err != nil { return errors.Wrap(err, "error while writing out vendor tree") } @@ -341,7 +361,7 @@ func (sw *SafeWriter) Write(root string, sm gps.SourceManager, noExamples bool) // Ensure vendor/.git is preserved if present if hasDotGit(vpath) { - err = renameWithFallback(filepath.Join(vpath, ".git"), filepath.Join(td, "vendor/.git")) + err = fs.RenameWithFallback(filepath.Join(vpath, ".git"), filepath.Join(td, "vendor/.git")) if _, ok := err.(*os.LinkError); ok { return errors.Wrap(err, "failed to preserve vendor/.git") } @@ -360,7 +380,7 @@ func (sw *SafeWriter) Write(root string, sm gps.SourceManager, noExamples bool) if _, err := os.Stat(mpath); err == nil { // Move out the old one. tmploc := filepath.Join(td, ManifestName+".orig") - failerr = renameWithFallback(mpath, tmploc) + failerr = fs.RenameWithFallback(mpath, tmploc) if failerr != nil { goto fail } @@ -368,7 +388,7 @@ func (sw *SafeWriter) Write(root string, sm gps.SourceManager, noExamples bool) } // Move in the new one. - failerr = renameWithFallback(filepath.Join(td, ManifestName), mpath) + failerr = fs.RenameWithFallback(filepath.Join(td, ManifestName), mpath) if failerr != nil { goto fail } @@ -379,7 +399,7 @@ func (sw *SafeWriter) Write(root string, sm gps.SourceManager, noExamples bool) // Move out the old one. tmploc := filepath.Join(td, LockName+".orig") - failerr = renameWithFallback(lpath, tmploc) + failerr = fs.RenameWithFallback(lpath, tmploc) if failerr != nil { goto fail } @@ -387,13 +407,13 @@ func (sw *SafeWriter) Write(root string, sm gps.SourceManager, noExamples bool) } // Move in the new one. - failerr = renameWithFallback(filepath.Join(td, LockName), lpath) + failerr = fs.RenameWithFallback(filepath.Join(td, LockName), lpath) if failerr != nil { goto fail } } - if sw.HasVendor() { + if sw.writeVendor { if _, err := os.Stat(vpath); err == nil { // Move out the old vendor dir. just do it into an adjacent dir, to // try to mitigate the possibility of a pointless cross-filesystem @@ -405,7 +425,7 @@ func (sw *SafeWriter) Write(root string, sm gps.SourceManager, noExamples bool) vendorbak = filepath.Join(td, "vendor.orig") } - failerr = renameWithFallback(vpath, vendorbak) + failerr = fs.RenameWithFallback(vpath, vendorbak) if failerr != nil { goto fail } @@ -413,7 +433,7 @@ func (sw *SafeWriter) Write(root string, sm gps.SourceManager, noExamples bool) } // Move in the new one. - failerr = renameWithFallback(filepath.Join(td, "vendor"), vpath) + failerr = fs.RenameWithFallback(filepath.Join(td, "vendor"), vpath) if failerr != nil { goto fail } @@ -421,7 +441,7 @@ func (sw *SafeWriter) Write(root string, sm gps.SourceManager, noExamples bool) // Renames all went smoothly. The deferred os.RemoveAll will get the temp // dir, but if we wrote vendor, we have to clean that up directly - if sw.HasVendor() { + if sw.writeVendor { // Nothing we can really do about an error at this point, so ignore it os.RemoveAll(vendorbak) } @@ -432,11 +452,12 @@ fail: // If we failed at any point, move all the things back into place, then bail. for _, pair := range restore { // Nothing we can do on err here, as we're already in recovery mode. - renameWithFallback(pair.from, pair.to) + fs.RenameWithFallback(pair.from, pair.to) } return failerr } +// PrintPreparedActions logs the actions a call to Write would perform. func (sw *SafeWriter) PrintPreparedActions(output *log.Logger) error { if sw.HasManifest() { output.Printf("Would have written the following %s:\n", ManifestName) @@ -448,16 +469,16 @@ func (sw *SafeWriter) PrintPreparedActions(output *log.Logger) error { } if sw.HasLock() { - if sw.LockDiff == nil { + if sw.lockDiff == nil { output.Printf("Would have written the following %s:\n", LockName) - l, err := sw.Lock.MarshalTOML() + l, err := sw.lock.MarshalTOML() if err != nil { return errors.Wrap(err, "ensure DryRun cannot serialize lock") } output.Println(string(l)) } else { output.Printf("Would have written the following changes to %s:\n", LockName) - diff, err := formatLockDiff(*sw.LockDiff) + diff, err := formatLockDiff(*sw.lockDiff) if err != nil { return errors.Wrap(err, "ensure DryRun cannot serialize the lock diff") } @@ -465,9 +486,9 @@ func (sw *SafeWriter) PrintPreparedActions(output *log.Logger) error { } } - if sw.HasVendor() { + if sw.writeVendor { output.Println("Would have written the following projects to the vendor directory:") - for _, project := range sw.Lock.Projects() { + for _, project := range sw.lock.Projects() { prj := project.Ident() rev, _, _ := gps.VersionComponentStrings(project.Version()) if prj.Source == "" { @@ -481,7 +502,8 @@ func (sw *SafeWriter) PrintPreparedActions(output *log.Logger) error { return nil } -func PruneProject(p *Project, sm gps.SourceManager) error { +// PruneProject removes unused packages from a project. +func PruneProject(p *Project, sm gps.SourceManager, logger *log.Logger) error { td, err := ioutil.TempDir(os.TempDir(), "dep") if err != nil { return errors.Wrap(err, "error while creating temp dir for writing manifest/lock/vendor") @@ -500,11 +522,22 @@ func PruneProject(p *Project, sm gps.SourceManager) error { } } - toDelete, err := calculatePrune(td, toKeep) + toDelete, err := calculatePrune(td, toKeep, logger) if err != nil { return err } + if logger != nil { + if len(toDelete) > 0 { + logger.Println("Calculated the following directories to prune:") + for _, d := range toDelete { + logger.Printf(" %s\n", d) + } + } else { + logger.Println("No directories found to prune") + } + } + if err := deleteDirs(toDelete); err != nil { return err } @@ -521,14 +554,14 @@ func PruneProject(p *Project, sm gps.SourceManager) error { // to a proper tempdir. vendorbak = filepath.Join(td, "vendor.orig") } - failerr = renameWithFallback(vpath, vendorbak) + failerr = fs.RenameWithFallback(vpath, vendorbak) if failerr != nil { goto fail } } // Move in the new one. - failerr = renameWithFallback(td, vpath) + failerr = fs.RenameWithFallback(td, vpath) if failerr != nil { goto fail } @@ -538,11 +571,14 @@ func PruneProject(p *Project, sm gps.SourceManager) error { return nil fail: - renameWithFallback(vendorbak, vpath) + fs.RenameWithFallback(vendorbak, vpath) return failerr } -func calculatePrune(vendorDir string, keep []string) ([]string, error) { +func calculatePrune(vendorDir string, keep []string, logger *log.Logger) ([]string, error) { + if logger != nil { + logger.Println("Calculating prune. Checking the following packages:") + } sort.Strings(keep) toDelete := []string{} err := filepath.Walk(vendorDir, func(path string, info os.FileInfo, err error) error { @@ -557,6 +593,9 @@ func calculatePrune(vendorDir string, keep []string) ([]string, error) { } name := strings.TrimPrefix(path, vendorDir+"/") + if logger != nil { + logger.Printf(" %s", name) + } i := sort.Search(len(keep), func(i int) bool { return name <= keep[i] }) diff --git a/txn_writer_test.go b/txn_writer_test.go index ecf0168f29..c0f0b9187a 100644 --- a/txn_writer_test.go +++ b/txn_writer_test.go @@ -26,7 +26,7 @@ func TestSafeWriter_BadInput_MissingRoot(t *testing.T) { defer pc.Release() sw, _ := NewSafeWriter(nil, nil, nil, VendorOnChanged) - err := sw.Write("", pc.SourceManager, false) + err := sw.Write("", pc.SourceManager, true) if err == nil { t.Fatal("should have errored without a root path, but did not") @@ -44,7 +44,7 @@ func TestSafeWriter_BadInput_MissingSourceManager(t *testing.T) { pc.Load() sw, _ := NewSafeWriter(nil, nil, pc.Project.Lock, VendorAlways) - err := sw.Write(pc.Project.AbsRoot, nil, false) + err := sw.Write(pc.Project.AbsRoot, nil, true) if err == nil { t.Fatal("should have errored without a source manager when forceVendor is true, but did not") @@ -92,7 +92,7 @@ func TestSafeWriter_BadInput_NonexistentRoot(t *testing.T) { sw, _ := NewSafeWriter(nil, nil, nil, VendorOnChanged) missingroot := filepath.Join(pc.Project.AbsRoot, "nonexistent") - err := sw.Write(missingroot, pc.SourceManager, false) + err := sw.Write(missingroot, pc.SourceManager, true) if err == nil { t.Fatal("should have errored with nonexistent dir for root path, but did not") @@ -110,7 +110,7 @@ func TestSafeWriter_BadInput_RootIsFile(t *testing.T) { sw, _ := NewSafeWriter(nil, nil, nil, VendorOnChanged) fileroot := pc.CopyFile("fileroot", "txn_writer/badinput_fileroot") - err := sw.Write(fileroot, pc.SourceManager, false) + err := sw.Write(fileroot, pc.SourceManager, true) if err == nil { t.Fatal("should have errored when root path is a file, but did not") @@ -140,12 +140,12 @@ func TestSafeWriter_Manifest(t *testing.T) { if sw.HasLock() { t.Fatal("Did not expect the payload to contain the lock") } - if sw.HasVendor() { + if sw.writeVendor { t.Fatal("Did not expect the payload to contain the vendor directory") } // Write changes - err := sw.Write(pc.Project.AbsRoot, pc.SourceManager, false) + err := sw.Write(pc.Project.AbsRoot, pc.SourceManager, true) h.Must(errors.Wrap(err, "SafeWriter.Write failed")) // Verify file system changes @@ -182,12 +182,12 @@ func TestSafeWriter_ManifestAndUnmodifiedLock(t *testing.T) { if !sw.HasLock() { t.Fatal("Expected the payload to contain the lock.") } - if sw.HasVendor() { + if sw.writeVendor { t.Fatal("Did not expect the payload to contain the vendor directory") } // Write changes - err := sw.Write(pc.Project.AbsRoot, pc.SourceManager, false) + err := sw.Write(pc.Project.AbsRoot, pc.SourceManager, true) h.Must(errors.Wrap(err, "SafeWriter.Write failed")) // Verify file system changes @@ -224,12 +224,12 @@ func TestSafeWriter_ManifestAndUnmodifiedLockWithForceVendor(t *testing.T) { if !sw.HasLock() { t.Fatal("Expected the payload to contain the lock") } - if !sw.HasVendor() { + if !sw.writeVendor { t.Fatal("Expected the payload to contain the vendor directory") } // Write changes - err := sw.Write(pc.Project.AbsRoot, pc.SourceManager, false) + err := sw.Write(pc.Project.AbsRoot, pc.SourceManager, true) h.Must(errors.Wrap(err, "SafeWriter.Write failed")) // Verify file system changes @@ -261,22 +261,22 @@ func TestSafeWriter_ModifiedLock(t *testing.T) { originalLock := new(Lock) *originalLock = *pc.Project.Lock - originalLock.Memo = []byte{} // zero out the input hash to ensure non-equivalency + originalLock.SolveMeta.InputsDigest = []byte{} // zero out the input hash to ensure non-equivalency sw, _ := NewSafeWriter(nil, originalLock, pc.Project.Lock, VendorOnChanged) // Verify prepared actions if sw.HasManifest() { - t.Fatal("Did not expect the payload to contain the manifest") + t.Fatal("Did not expect the manifest to be written") } if !sw.HasLock() { - t.Fatal("Expected the payload to contain the lock") + t.Fatal("Expected that the writer should plan to write the lock") } - if !sw.HasVendor() { - t.Fatal("Expected the payload to contain the vendor directory") + if !sw.writeVendor { + t.Fatal("Expected that the writer should plan to write the vendor directory") } // Write changes - err := sw.Write(pc.Project.AbsRoot, pc.SourceManager, false) + err := sw.Write(pc.Project.AbsRoot, pc.SourceManager, true) h.Must(errors.Wrap(err, "SafeWriter.Write failed")) // Verify file system changes @@ -308,7 +308,7 @@ func TestSafeWriter_ModifiedLockSkipVendor(t *testing.T) { originalLock := new(Lock) *originalLock = *pc.Project.Lock - originalLock.Memo = []byte{} // zero out the input hash to ensure non-equivalency + originalLock.SolveMeta.InputsDigest = []byte{} // zero out the input hash to ensure non-equivalency sw, _ := NewSafeWriter(nil, originalLock, pc.Project.Lock, VendorNever) // Verify prepared actions @@ -318,12 +318,12 @@ func TestSafeWriter_ModifiedLockSkipVendor(t *testing.T) { if !sw.HasLock() { t.Fatal("Expected the payload to contain the lock") } - if sw.HasVendor() { + if sw.writeVendor { t.Fatal("Did not expect the payload to contain the vendor directory") } // Write changes - err := sw.Write(pc.Project.AbsRoot, pc.SourceManager, false) + err := sw.Write(pc.Project.AbsRoot, pc.SourceManager, true) h.Must(errors.Wrap(err, "SafeWriter.Write failed")) // Verify file system changes @@ -351,7 +351,7 @@ func TestSafeWriter_ForceVendorWhenVendorAlreadyExists(t *testing.T) { pc.Load() sw, _ := NewSafeWriter(nil, pc.Project.Lock, pc.Project.Lock, VendorAlways) - err := sw.Write(pc.Project.AbsRoot, pc.SourceManager, false) + err := sw.Write(pc.Project.AbsRoot, pc.SourceManager, true) h.Must(errors.Wrap(err, "SafeWriter.Write failed")) // Verify prepared actions @@ -362,11 +362,11 @@ func TestSafeWriter_ForceVendorWhenVendorAlreadyExists(t *testing.T) { if !sw.HasLock() { t.Fatal("Expected the payload to contain the lock") } - if !sw.HasVendor() { + if !sw.writeVendor { t.Fatal("Expected the payload to contain the vendor directory ") } - err = sw.Write(pc.Project.AbsRoot, pc.SourceManager, false) + err = sw.Write(pc.Project.AbsRoot, pc.SourceManager, true) h.Must(errors.Wrap(err, "SafeWriter.Write failed")) // Verify file system changes @@ -408,12 +408,12 @@ func TestSafeWriter_NewLock(t *testing.T) { if !sw.HasLock() { t.Fatal("Expected the payload to contain the lock") } - if !sw.HasVendor() { + if !sw.writeVendor { t.Fatal("Expected the payload to contain the vendor directory") } // Write changes - err = sw.Write(pc.Project.AbsRoot, pc.SourceManager, false) + err = sw.Write(pc.Project.AbsRoot, pc.SourceManager, true) h.Must(errors.Wrap(err, "SafeWriter.Write failed")) // Verify file system changes @@ -452,12 +452,12 @@ func TestSafeWriter_NewLockSkipVendor(t *testing.T) { if !sw.HasLock() { t.Fatal("Expected the payload to contain the lock") } - if sw.HasVendor() { + if sw.writeVendor { t.Fatal("Did not expect the payload to contain the vendor directory") } // Write changes - err = sw.Write(pc.Project.AbsRoot, pc.SourceManager, false) + err = sw.Write(pc.Project.AbsRoot, pc.SourceManager, true) h.Must(errors.Wrap(err, "SafeWriter.Write failed")) // Verify file system changes @@ -492,7 +492,7 @@ func TestSafeWriter_DiffLocks(t *testing.T) { sw, _ := NewSafeWriter(nil, pc.Project.Lock, updatedLock, VendorOnChanged) // Verify lock diff - diff := sw.LockDiff + diff := sw.lockDiff if diff == nil { t.Fatal("Expected the payload to contain a diff of the lock files") } @@ -543,11 +543,11 @@ func TestSafeWriter_VendorDotGitPreservedWithForceVendor(t *testing.T) { if !sw.HasLock() { t.Fatal("Expected the payload to contain the lock") } - if !sw.HasVendor() { + if !sw.writeVendor { t.Fatal("Expected the payload to contain the vendor directory") } - err := sw.Write(pc.Project.AbsRoot, pc.SourceManager, false) + err := sw.Write(pc.Project.AbsRoot, pc.SourceManager, true) h.Must(errors.Wrap(err, "SafeWriter.Write failed")) // Verify file system changes diff --git a/vendor/github.com/Masterminds/semver/.travis.yml b/vendor/github.com/Masterminds/semver/.travis.yml index 3231840778..fa92a5a326 100644 --- a/vendor/github.com/Masterminds/semver/.travis.yml +++ b/vendor/github.com/Masterminds/semver/.travis.yml @@ -1,7 +1,6 @@ language: go go: - - 1.5 - 1.6 - 1.7 - tip @@ -13,8 +12,8 @@ go: sudo: false script: - - GO15VENDOREXPERIMENT=1 make setup - - GO15VENDOREXPERIMENT=1 make test + - make setup + - make test notifications: webhooks: diff --git a/vendor/github.com/Masterminds/semver/collection.go b/vendor/github.com/Masterminds/semver/collection.go index a78235895f..459fbe0e3d 100644 --- a/vendor/github.com/Masterminds/semver/collection.go +++ b/vendor/github.com/Masterminds/semver/collection.go @@ -3,7 +3,7 @@ package semver // Collection is a collection of Version instances and implements the sort // interface. See the sort package for more details. // https://golang.org/pkg/sort/ -type Collection []*Version +type Collection []Version // Len returns the length of a collection. The number of Version instances // on the slice. diff --git a/vendor/github.com/Masterminds/semver/collection_test.go b/vendor/github.com/Masterminds/semver/collection_test.go index 71b909c4e0..a1d745f476 100644 --- a/vendor/github.com/Masterminds/semver/collection_test.go +++ b/vendor/github.com/Masterminds/semver/collection_test.go @@ -15,7 +15,7 @@ func TestCollection(t *testing.T) { "0.4.2", } - vs := make([]*Version, len(raw)) + vs := make([]Version, len(raw)) for i, r := range raw { v, err := NewVersion(r) if err != nil { diff --git a/vendor/github.com/Masterminds/semver/constraints.go b/vendor/github.com/Masterminds/semver/constraints.go index bf2f500380..628a44a161 100644 --- a/vendor/github.com/Masterminds/semver/constraints.go +++ b/vendor/github.com/Masterminds/semver/constraints.go @@ -46,15 +46,26 @@ func init() { cvRegex, cvRegex)) } +// Constraint is the interface that wraps checking a semantic version against +// one or more constraints to find a match. type Constraint interface { - // Constraints compose the fmt.Stringer interface. Printing a constraint - // will yield a string that, if passed to NewConstraint(), will produce the - // original constraint. (Bidirectional serialization) + // Constraints compose the fmt.Stringer interface. This method is the + // bijective inverse of NewConstraint(): if a string yielded from this + // method is passed to NewConstraint(), a byte-identical instance of the + // original Constraint will be returend. fmt.Stringer + // ImpliedCaretString converts the Constraint to a string in the same manner + // as String(), but treats the empty operator as equivalent to ^, rather + // than =. + // + // In the same way that String() is the inverse of NewConstraint(), this + // method is the inverse of to NewConstraintIC(). + ImpliedCaretString() string + // Matches checks that a version satisfies the constraint. If it does not, // an error is returned indcating the problem; if it does, the error is nil. - Matches(v *Version) error + Matches(v Version) error // Intersect computes the intersection between the receiving Constraint and // passed Constraint, and returns a new Constraint representing the result. @@ -84,9 +95,10 @@ type realConstraint interface { _real() } -// Controls whether or not parsed constraints are cached +// CacheConstraints controls whether or not parsed constraints are cached var CacheConstraints = true var constraintCache = make(map[string]ccache) +var constraintCacheIC = make(map[string]ccache) var constraintCacheLock sync.RWMutex type ccache struct { @@ -102,9 +114,19 @@ type ccache struct { // If an invalid constraint string is passed, more information is provided in // the returned error string. func NewConstraint(in string) (Constraint, error) { + return newConstraint(in, false, constraintCache) +} + +// NewConstraintIC (ImpliedConstraint) is the same as NewConstraint, except that +// it treats an absent operator as being equivalent to ^ instead of =. +func NewConstraintIC(in string) (Constraint, error) { + return newConstraint(in, true, constraintCacheIC) +} + +func newConstraint(in string, ic bool, cache map[string]ccache) (Constraint, error) { if CacheConstraints { constraintCacheLock.RLock() - if final, exists := constraintCache[in]; exists { + if final, exists := cache[in]; exists { constraintCacheLock.RUnlock() return final.c, final.err } @@ -120,11 +142,11 @@ func NewConstraint(in string) (Constraint, error) { cs := strings.Split(v, ",") result := make([]Constraint, len(cs)) for i, s := range cs { - pc, err := parseConstraint(s) + pc, err := parseConstraint(s, ic) if err != nil { if CacheConstraints { constraintCacheLock.Lock() - constraintCache[in] = ccache{err: err} + cache[in] = ccache{err: err} constraintCacheLock.Unlock() } return nil, err @@ -139,7 +161,7 @@ func NewConstraint(in string) (Constraint, error) { if CacheConstraints { constraintCacheLock.Lock() - constraintCache[in] = ccache{c: final} + cache[in] = ccache{c: final} constraintCacheLock.Unlock() } @@ -201,10 +223,8 @@ func Union(cg ...Constraint) Constraint { return c case none: continue - case *Version: - //if tc != nil { + case Version: //heap.Push(&real, tc) - //} real = append(real, tc) case rangeConstraint: //heap.Push(&real, tc) @@ -233,9 +253,9 @@ func Union(cg ...Constraint) Constraint { last := nuc[len(nuc)-1] switch lt := last.(type) { - case *Version: + case Version: switch ct := c.(type) { - case *Version: + case Version: // Two versions in a row; only append if they're not equal if !lt.Equal(ct) { nuc = append(nuc, ct) @@ -257,7 +277,7 @@ func Union(cg ...Constraint) Constraint { } case rangeConstraint: switch ct := c.(type) { - case *Version: + case Version: // Last was range, current is version. constraintList sort invariants guarantee // that the version will be greater than the min, so we have to // determine if the version is less than the max. If it is, we diff --git a/vendor/github.com/Masterminds/semver/constraints_test.go b/vendor/github.com/Masterminds/semver/constraints_test.go index df85e82412..a45714d746 100644 --- a/vendor/github.com/Masterminds/semver/constraints_test.go +++ b/vendor/github.com/Masterminds/semver/constraints_test.go @@ -11,28 +11,34 @@ func TestParseConstraint(t *testing.T) { {"*", Any(), false}, {">= 1.2", rangeConstraint{ min: newV(1, 2, 0), + max: Version{special: infiniteVersion}, includeMin: true, }, false}, {"1.0", newV(1, 0, 0), false}, {"foo", nil, true}, {"<= 1.2", rangeConstraint{ + min: Version{special: zeroVersion}, max: newV(1, 2, 0), includeMax: true, }, false}, {"=< 1.2", rangeConstraint{ + min: Version{special: zeroVersion}, max: newV(1, 2, 0), includeMax: true, }, false}, {"=> 1.2", rangeConstraint{ min: newV(1, 2, 0), + max: Version{special: infiniteVersion}, includeMin: true, }, false}, {"v1.2", newV(1, 2, 0), false}, {"=1.5", newV(1, 5, 0), false}, {"> 1.3", rangeConstraint{ min: newV(1, 3, 0), + max: Version{special: infiniteVersion}, }, false}, {"< 1.4.1", rangeConstraint{ + min: Version{special: zeroVersion}, max: newV(1, 4, 1), }, false}, {"~1.1.0", rangeConstraint{ @@ -50,7 +56,7 @@ func TestParseConstraint(t *testing.T) { } for _, tc := range tests { - c, err := parseConstraint(tc.in) + c, err := parseConstraint(tc.in, false) if tc.err && err == nil { t.Errorf("Expected error for %s didn't occur", tc.in) } else if !tc.err && err != nil { @@ -81,8 +87,8 @@ func constraintEq(c1, c2 Constraint) bool { return false } return true - case *Version: - if tc2, ok := c2.(*Version); ok { + case Version: + if tc2, ok := c2.(Version); ok { return tc1.Equal(tc2) } return false @@ -136,8 +142,8 @@ func constraintEq(c1, c2 Constraint) bool { } // newV is a helper to create a new Version object. -func newV(major, minor, patch uint64) *Version { - return &Version{ +func newV(major, minor, patch uint64) Version { + return Version{ major: major, minor: minor, patch: patch, @@ -177,18 +183,22 @@ func TestConstraintCheck(t *testing.T) { {">2.x", "3.0.0", true}, {">2.x", "2.9.9", false}, {">2.x", "1.9.9", false}, - // TODO these are all pending the changes in #10 - //{"<=2.x-beta1", "3.0.0-alpha2", false}, - //{">2.x-beta1", "3.0.0-alpha2", true}, - //{"<2.0.0", "2.0.0-alpha1", false}, - //{"<=2.0.0", "2.0.0-alpha1", true}, + {"<=2.x-alpha2", "3.0.0-alpha3", false}, + {"<=2.0.0", "2.0.0-alpha1", false}, + {">2.x-beta1", "3.0.0-alpha2", false}, + {"^2.0.0", "3.0.0-alpha2", false}, + {"^2.0.0", "2.0.0-alpha1", false}, + {"^2.1.0-alpha1", "2.1.0-alpha2", true}, // allow prerelease match within same major/minor/patch + {"^2.1.0-alpha1", "2.1.1-alpha2", false}, // but ONLY within same major/minor/patch + {"^2.1.0-alpha3", "2.1.0-alpha2", false}, // still respect prerelease ordering + {"^2.0.0", "2.0.0-alpha2", false}, // and only if the min has a prerelease } for _, tc := range tests { if testing.Verbose() { t.Logf("Testing if %q allows %q", tc.constraint, tc.version) } - c, err := parseConstraint(tc.constraint) + c, err := parseConstraint(tc.constraint, false) if err != nil { t.Errorf("err: %s", err) continue @@ -219,6 +229,7 @@ func TestNewConstraint(t *testing.T) { }{ {">= 1.1", rangeConstraint{ min: newV(1, 1, 0), + max: Version{special: infiniteVersion}, includeMin: true, }, false}, {"2.0", newV(2, 0, 0), false}, @@ -267,14 +278,17 @@ func TestNewConstraint(t *testing.T) { includeMax: false, }, false}, {"!=1.4.0", rangeConstraint{ - excl: []*Version{ + min: Version{special: zeroVersion}, + max: Version{special: infiniteVersion}, + excl: []Version{ newV(1, 4, 0), }, }, false}, {">=1.1.0, !=1.4.0", rangeConstraint{ min: newV(1, 1, 0), + max: Version{special: infiniteVersion}, includeMin: true, - excl: []*Version{ + excl: []Version{ newV(1, 4, 0), }, }, false}, @@ -299,6 +313,45 @@ func TestNewConstraint(t *testing.T) { } } +func TestNewConstraintIC(t *testing.T) { + tests := []struct { + input string + c Constraint + err bool + }{ + {"=2.0", newV(2, 0, 0), false}, + {"= 2.0", newV(2, 0, 0), false}, + {"1.1.0", rangeConstraint{ + min: newV(1, 1, 0), + max: newV(2, 0, 0), + includeMin: true, + }, false}, + {"1.1", rangeConstraint{ + min: newV(1, 1, 0), + max: newV(2, 0, 0), + includeMin: true, + }, false}, + } + + for _, tc := range tests { + c, err := NewConstraintIC(tc.input) + if tc.err && err == nil { + t.Errorf("expected but did not get error for: %s", tc.input) + continue + } else if !tc.err && err != nil { + t.Errorf("unexpectederror for input %s: %s", tc.input, err) + continue + } + if tc.err { + continue + } + + if !constraintEq(tc.c, c) { + t.Errorf("%q produced constraint %q, but expected %q", tc.input, c, tc.c) + } + } +} + func TestConstraintsCheck(t *testing.T) { tests := []struct { constraint string @@ -306,9 +359,13 @@ func TestConstraintsCheck(t *testing.T) { check bool }{ {"*", "1.2.3", true}, - {"~0.0.0", "1.2.3", false}, // npm allows this weird thing, but we don't + {"~0.0.0", "1.2.3", false}, + {"0.x.x", "1.2.3", false}, + {"0.0.x", "1.2.3", false}, {"~0.0.0", "0.1.9", false}, {"~0.0.0", "0.0.9", true}, + {"^0.0.0", "0.0.9", true}, + {"^0.0.0", "0.1.9", false}, // caret behaves like tilde below 1.0.0 {"= 2.0", "1.2.3", false}, {"= 2.0", "2.0.0", true}, {"4.1", "4.1.0", true}, @@ -398,7 +455,6 @@ func TestBidirectionalSerialization(t *testing.T) { }{ {"*", true}, // any {"~0.0.0", false}, // tildes expand into ranges - {"^2.0", false}, // carets expand into ranges {"=2.0", false}, // abbreviated versions print as full {"4.1.x", false}, // wildcards expand into ranges {">= 1.1.0", false}, // does not produce spaces on ranges @@ -415,8 +471,8 @@ func TestBidirectionalSerialization(t *testing.T) { {">1.1.1, <1.2.0", true}, // no unary op on gt min {">1.1.7, <=2.0.0", true}, // no unary op on gt min and lte max {">1.1.7, <=2.0.0", true}, // no unary op on gt min and lte max - {">=0.1.7, <1.0.0", true}, // carat shifting below 1.0.0 - {">=0.1.7, <0.3.0", true}, // carat shifting width below 1.0.0 + {">=0.1.7, <1.0.0", true}, // caret shifting below 1.0.0 + {">=0.1.7, <0.3.0", true}, // caret shifting width below 1.0.0 } for _, fix := range tests { @@ -436,11 +492,38 @@ func TestBidirectionalSerialization(t *testing.T) { } } +func TestBidirectionalSerializationIC(t *testing.T) { + tests := []struct { + io string + eq bool + }{ + {"*", true}, // any + {"=2.0.0", true}, // versions retain leading = + {"2.0.0", true}, // (no) caret in, (no) caret out + } + + for _, fix := range tests { + c, err := NewConstraintIC(fix.io) + if err != nil { + t.Errorf("Valid constraint string produced unexpected error: %s", err) + } + + eq := fix.io == c.ImpliedCaretString() + if eq != fix.eq { + if eq { + t.Errorf("Constraint %q should not have reproduced input string %q, but did", c, fix.io) + } else { + t.Errorf("Constraint should have reproduced input string %q, but instead produced %q", fix.io, c) + } + } + } +} + func TestPreferUnaryOpForm(t *testing.T) { tests := []struct { in, out string }{ - {">=0.1.7, <0.2.0", "^0.1.7"}, // carat shifting below 1.0.0 + {">=0.1.7, <0.2.0", "^0.1.7"}, // caret shifting below 1.0.0 {">=1.1.0, <2.0.0", "^1.1.0"}, {">=1.1.0, <2.0.0, !=1.2.3", "^1.1.0, !=1.2.3"}, } @@ -530,10 +613,12 @@ func TestIsSuperset(t *testing.T) { max: newV(2, 1, 0), }, { + min: Version{special: zeroVersion}, max: newV(1, 10, 0), }, { min: newV(2, 0, 0), + max: Version{special: infiniteVersion}, }, { min: newV(1, 2, 0), @@ -604,7 +689,7 @@ func TestIsSuperset(t *testing.T) { // isSupersetOf ignores excludes, so even though this would make rc[1] not a // superset of rc[0] anymore, it should still say it is. - rc[1].excl = []*Version{ + rc[1].excl = []Version{ newV(1, 5, 0), } diff --git a/vendor/github.com/Masterminds/semver/error.go b/vendor/github.com/Masterminds/semver/error.go index 4fb73456e9..9eb33b39d1 100644 --- a/vendor/github.com/Masterminds/semver/error.go +++ b/vendor/github.com/Masterminds/semver/error.go @@ -11,6 +11,7 @@ var rangeErrs = [...]string{ "%s is greater than the maximum of %s", "%s is greater than or equal to the maximum of %s", "%s is specifically disallowed in %s", + "%s has prerelease data, so is omitted by the range %s", } const ( @@ -19,15 +20,21 @@ const ( rerrGT rerrGTE rerrNE + rerrPre ) +// MatchFailure is an interface for failures to find a Constraint match. type MatchFailure interface { error - Pair() (v *Version, c Constraint) + + // Pair returns the version and constraint that did not match prompting + // the error. + Pair() (v Version, c Constraint) } +// RangeMatchFailure occurs when a version is not within a constraint range. type RangeMatchFailure struct { - v *Version + v Version rc rangeConstraint typ int8 } @@ -36,22 +43,29 @@ func (rce RangeMatchFailure) Error() string { return fmt.Sprintf(rangeErrs[rce.typ], rce.v, rce.rc) } -func (rce RangeMatchFailure) Pair() (v *Version, r Constraint) { +// Pair returns the version and constraint that did not match. Part of the +// MatchFailure interface. +func (rce RangeMatchFailure) Pair() (v Version, r Constraint) { return rce.v, rce.rc } +// VersionMatchFailure occurs when two versions do not match each other. type VersionMatchFailure struct { - v, other *Version + v, other Version } func (vce VersionMatchFailure) Error() string { return fmt.Sprintf("%s is not equal to %s", vce.v, vce.other) } -func (vce VersionMatchFailure) Pair() (v *Version, r Constraint) { +// Pair returns the two versions that did not match. Part of the +// MatchFailure interface. +func (vce VersionMatchFailure) Pair() (v Version, r Constraint) { return vce.v, vce.other } +// MultiMatchFailure errors occur when there are multiple constraints a version +// is being checked against and there are failures. type MultiMatchFailure []MatchFailure func (mmf MultiMatchFailure) Error() string { diff --git a/vendor/github.com/Masterminds/semver/magic.go b/vendor/github.com/Masterminds/semver/magic.go index 9a8d353a85..7eee64f14a 100644 --- a/vendor/github.com/Masterminds/semver/magic.go +++ b/vendor/github.com/Masterminds/semver/magic.go @@ -2,7 +2,7 @@ package semver import "errors" -var noneErr = errors.New("The 'None' constraint admits no versions.") +var errNone = errors.New("The 'None' constraint admits no versions.") // Any is a constraint that is satisfied by any valid semantic version. type any struct{} @@ -16,9 +16,13 @@ func (any) String() string { return "*" } +func (any) ImpliedCaretString() string { + return "*" +} + // Matches checks that a version satisfies the constraint. As all versions // satisfy Any, this always returns nil. -func (any) Matches(v *Version) error { +func (any) Matches(v Version) error { return nil } @@ -59,10 +63,14 @@ func (none) String() string { return "" } +func (none) ImpliedCaretString() string { + return "" +} + // Matches checks that a version satisfies the constraint. As no version can // satisfy None, this always fails (returns an error). -func (none) Matches(v *Version) error { - return noneErr +func (none) Matches(v Version) error { + return errNone } // Intersect computes the intersection between two constraints. diff --git a/vendor/github.com/Masterminds/semver/parse.go b/vendor/github.com/Masterminds/semver/parse.go index a6e6a97d00..d6afa6c907 100644 --- a/vendor/github.com/Masterminds/semver/parse.go +++ b/vendor/github.com/Masterminds/semver/parse.go @@ -20,7 +20,7 @@ func rewriteRange(i string) string { return o } -func parseConstraint(c string) (Constraint, error) { +func parseConstraint(c string, cbd bool) (Constraint, error) { m := constraintRegex.FindStringSubmatch(c) if m == nil { return nil, fmt.Errorf("Malformed constraint: %s", c) @@ -49,6 +49,16 @@ func parseConstraint(c string) (Constraint, error) { return nil, errors.New("constraint Parser Error") } + // We never want to keep the "original" data in a constraint, and keeping it + // around can disrupt simple equality comparisons. So, strip it out. + v.original = "" + + // If caret-by-default flag is on and there's no operator, convert the + // operator to a caret. + if cbd && m[1] == "" { + m[1] = "^" + } + switch m[1] { case "^": // Caret always expands to a range @@ -81,11 +91,13 @@ func parseConstraint(c string) (Constraint, error) { } } -func expandCaret(v *Version) Constraint { - maxv := &Version{ - major: v.major + 1, - minor: 0, - patch: 0, +func expandCaret(v Version) Constraint { + var maxv Version + // Caret behaves like tilde below 1.0.0 + if v.major == 0 { + maxv.minor = v.minor + 1 + } else { + maxv.major = v.major + 1 } return rangeConstraint{ @@ -96,13 +108,13 @@ func expandCaret(v *Version) Constraint { } } -func expandTilde(v *Version, wildMinor bool) Constraint { +func expandTilde(v Version, wildMinor bool) Constraint { if wildMinor { // When minor is wild on a tilde, behavior is same as caret return expandCaret(v) } - maxv := &Version{ + maxv := Version{ major: v.major, minor: v.minor + 1, patch: 0, @@ -122,23 +134,26 @@ func expandTilde(v *Version, wildMinor bool) Constraint { // (which is how we represent a disjoint set). If there are no wildcards, it // will expand to a rangeConstraint with no min or max, but having the one // exception. -func expandNeq(v *Version, wildMinor, wildPatch bool) Constraint { +func expandNeq(v Version, wildMinor, wildPatch bool) Constraint { if !(wildMinor || wildPatch) { return rangeConstraint{ - excl: []*Version{v}, + min: Version{special: zeroVersion}, + max: Version{special: infiniteVersion}, + excl: []Version{v}, } } // Create the low range with no min, and the max as the floor admitted by // the wildcard lr := rangeConstraint{ + min: Version{special: zeroVersion}, max: v, includeMax: false, } // The high range uses the derived version (bumped depending on where the // wildcards were) as the min, and is inclusive - minv := &Version{ + minv := Version{ major: v.major, minor: v.minor, patch: v.patch, @@ -152,16 +167,17 @@ func expandNeq(v *Version, wildMinor, wildPatch bool) Constraint { hr := rangeConstraint{ min: minv, + max: Version{special: infiniteVersion}, includeMin: true, } return Union(lr, hr) } -func expandGreater(v *Version, wildMinor, wildPatch, eq bool) Constraint { +func expandGreater(v Version, wildMinor, wildPatch, eq bool) Constraint { if (wildMinor || wildPatch) && !eq { // wildcards negate the meaning of prerelease and other info - v = &Version{ + v = Version{ major: v.major, minor: v.minor, patch: v.patch, @@ -176,20 +192,22 @@ func expandGreater(v *Version, wildMinor, wildPatch, eq bool) Constraint { } return rangeConstraint{ min: v, + max: Version{special: infiniteVersion}, includeMin: true, } } return rangeConstraint{ min: v, + max: Version{special: infiniteVersion}, includeMin: eq, } } -func expandLess(v *Version, wildMinor, wildPatch, eq bool) Constraint { +func expandLess(v Version, wildMinor, wildPatch, eq bool) Constraint { if eq && (wildMinor || wildPatch) { // wildcards negate the meaning of prerelease and other info - v = &Version{ + v = Version{ major: v.major, minor: v.minor, patch: v.patch, @@ -200,12 +218,14 @@ func expandLess(v *Version, wildMinor, wildPatch, eq bool) Constraint { v.minor++ } return rangeConstraint{ + min: Version{special: zeroVersion}, max: v, includeMax: false, } } return rangeConstraint{ + min: Version{special: zeroVersion}, max: v, includeMax: eq, } diff --git a/vendor/github.com/Masterminds/semver/range.go b/vendor/github.com/Masterminds/semver/range.go index 5509711742..bcfdfcf9a4 100644 --- a/vendor/github.com/Masterminds/semver/range.go +++ b/vendor/github.com/Masterminds/semver/range.go @@ -7,13 +7,14 @@ import ( ) type rangeConstraint struct { - min, max *Version + min, max Version includeMin, includeMax bool - excl []*Version + excl []Version } -func (rc rangeConstraint) Matches(v *Version) error { +func (rc rangeConstraint) Matches(v Version) error { var fail bool + ispre := v.Prerelease() != "" rce := RangeMatchFailure{ v: v, @@ -21,8 +22,6 @@ func (rc rangeConstraint) Matches(v *Version) error { } if !rc.minIsZero() { - // TODO ensure sane handling of prerelease versions (which are strictly - // less than the normal version, but should be admitted in a geq range) cmp := rc.min.Compare(v) if rc.includeMin { rce.typ = rerrLT @@ -38,8 +37,6 @@ func (rc rangeConstraint) Matches(v *Version) error { } if !rc.maxIsInf() { - // TODO ensure sane handling of prerelease versions (which are strictly - // less than the normal version, but should be admitted in a geq range) cmp := rc.max.Compare(v) if rc.includeMax { rce.typ = rerrGT @@ -47,6 +44,7 @@ func (rc rangeConstraint) Matches(v *Version) error { } else { rce.typ = rerrGTE fail = cmp != 1 + } if fail { @@ -61,6 +59,14 @@ func (rc rangeConstraint) Matches(v *Version) error { } } + // If the incoming version has prerelease info, it's usually a match failure + // - unless all the numeric parts are equal between the incoming and the + // minimum. + if !fail && ispre && !numPartsEq(rc.min, v) { + rce.typ = rerrPre + return rce + } + return nil } @@ -70,8 +76,8 @@ func (rc rangeConstraint) dup() rangeConstraint { return rc } - var excl []*Version - excl = make([]*Version, len(rc.excl)) + var excl []Version + excl = make([]Version, len(rc.excl)) copy(excl, rc.excl) return rangeConstraint{ @@ -84,11 +90,11 @@ func (rc rangeConstraint) dup() rangeConstraint { } func (rc rangeConstraint) minIsZero() bool { - return rc.min == nil + return rc.min.special == zeroVersion } func (rc rangeConstraint) maxIsInf() bool { - return rc.max == nil + return rc.max.special == infiniteVersion } func (rc rangeConstraint) Intersect(c Constraint) Constraint { @@ -99,12 +105,11 @@ func (rc rangeConstraint) Intersect(c Constraint) Constraint { return None() case unionConstraint: return oc.Intersect(rc) - case *Version: + case Version: if err := rc.Matches(oc); err != nil { return None() - } else { - return c } + return c case rangeConstraint: nr := rangeConstraint{ min: rc.min, @@ -174,7 +179,7 @@ func (rc rangeConstraint) Union(c Constraint) Constraint { return rc case unionConstraint: return Union(rc, oc) - case *Version: + case Version: if err := rc.Matches(oc); err == nil { return rc } else if len(rc.excl) > 0 { // TODO (re)checking like this is wasteful @@ -182,7 +187,7 @@ func (rc rangeConstraint) Union(c Constraint) Constraint { // it and return that for k, e := range rc.excl { if e.Equal(oc) { - excl := make([]*Version, len(rc.excl)-1) + excl := make([]Version, len(rc.excl)-1) if k == len(rc.excl)-1 { copy(excl, rc.excl[:k]) @@ -204,12 +209,12 @@ func (rc rangeConstraint) Union(c Constraint) Constraint { if oc.LessThan(rc.min) { return unionConstraint{oc, rc.dup()} } - if areEq(oc, rc.min) { + if oc.Equal(rc.min) { ret := rc.dup() ret.includeMin = true return ret } - if areEq(oc, rc.max) { + if oc.Equal(rc.max) { ret := rc.dup() ret.includeMax = true return ret @@ -233,7 +238,10 @@ func (rc rangeConstraint) Union(c Constraint) Constraint { } // There's at least some dupes, which are all we need to include - nc := rangeConstraint{} + nc := rangeConstraint{ + min: Version{special: zeroVersion}, + max: Version{special: infiniteVersion}, + } for _, e1 := range rc.excl { for _, e2 := range oc.excl { if e1.Equal(e2) { @@ -264,7 +272,10 @@ func (rc rangeConstraint) Union(c Constraint) Constraint { } else if rc.MatchesAny(oc) { // Receiver and input overlap; form a new range accordingly. - nc := rangeConstraint{} + nc := rangeConstraint{ + min: Version{special: zeroVersion}, + max: Version{special: infiniteVersion}, + } // For efficiency, we simultaneously determine if either of the // ranges are supersets of the other, while also selecting the min @@ -370,6 +381,14 @@ func (rc rangeConstraint) isSupersetOf(rc2 rangeConstraint) bool { } func (rc rangeConstraint) String() string { + return rc.toString(false) +} + +func (rc rangeConstraint) ImpliedCaretString() string { + return rc.toString(true) +} + +func (rc rangeConstraint) toString(impliedCaret bool) string { var pieces []string // We need to trigger the standard verbose handling from various points, so @@ -393,7 +412,14 @@ func (rc rangeConstraint) String() string { } // Handle the possibility that we might be able to express the range - // with a carat or tilde, as we prefer those forms. + // with a caret or tilde, as we prefer those forms. + var caretstr string + if impliedCaret { + caretstr = "%s" + } else { + caretstr = "^%s" + } + switch { case rc.minIsZero() && rc.maxIsInf(): // This if is internal because it's useful to know for the other cases @@ -404,13 +430,13 @@ func (rc rangeConstraint) String() string { return "*" } case rc.minIsZero(), rc.includeMax, !rc.includeMin: - // tilde and carat could never apply here + // tilde and caret could never apply here noshort() - case !rc.maxIsInf() && rc.max.Minor() == 0 && rc.max.Patch() == 0: // basic carat + case !rc.maxIsInf() && rc.max.Minor() == 0 && rc.max.Patch() == 0: // basic caret if rc.min.Major() == rc.max.Major()-1 && rc.min.Major() != 0 { - pieces = append(pieces, fmt.Sprintf("^%s", rc.min)) + pieces = append(pieces, fmt.Sprintf(caretstr, rc.min)) } else { - // range is too wide for carat, need standard operators + // range is too wide for caret, need standard operators noshort() } case !rc.maxIsInf() && rc.max.Major() != 0 && rc.max.Patch() == 0: // basic tilde @@ -421,10 +447,10 @@ func (rc rangeConstraint) String() string { noshort() } case !rc.maxIsInf() && rc.max.Major() == 0 && rc.max.Patch() == 0 && rc.max.Minor() != 0: - // below 1.0.0, tilde is meaningless but carat is shifted to the + // below 1.0.0, tilde is meaningless but caret is shifted to the // right (so it basically behaves the same as tilde does above 1.0.0) if rc.min.Minor() == rc.max.Minor()-1 { - pieces = append(pieces, fmt.Sprintf("^%s", rc.min)) + pieces = append(pieces, fmt.Sprintf(caretstr, rc.min)) } else { noshort() } @@ -457,7 +483,7 @@ func areAdjacent(c1, c2 Constraint) bool { return false } - if !areEq(rc1.max, rc2.min) { + if !rc1.max.Equal(rc2.min) { return false } @@ -472,10 +498,10 @@ func (rc rangeConstraint) MatchesAny(c Constraint) bool { return true } -func dedupeExcls(ex1, ex2 []*Version) []*Version { +func dedupeExcls(ex1, ex2 []Version) []Version { // TODO stupid inefficient, but these are really only ever going to be // small, so not worth optimizing right now - var ret []*Version + var ret []Version oloop: for _, e1 := range ex1 { for _, e2 := range ex2 { @@ -491,14 +517,3 @@ oloop: func (rangeConstraint) _private() {} func (rangeConstraint) _real() {} - -func areEq(v1, v2 *Version) bool { - if v1 == nil && v2 == nil { - return true - } - - if v1 != nil && v2 != nil { - return v1.Equal(v2) - } - return false -} diff --git a/vendor/github.com/Masterminds/semver/set_ops_test.go b/vendor/github.com/Masterminds/semver/set_ops_test.go index 363e8484d1..c08f27618d 100644 --- a/vendor/github.com/Masterminds/semver/set_ops_test.go +++ b/vendor/github.com/Masterminds/semver/set_ops_test.go @@ -14,7 +14,7 @@ func TestIntersection(t *testing.T) { } if actual = Intersection(rc1); !constraintEq(actual, rc1) { - t.Errorf("Intersection of one item should always return that item; got %q") + t.Errorf("Intersection of one item should always return that item; got %q", actual) } if actual = Intersection(rc1, None()); !IsNone(actual) { @@ -83,7 +83,7 @@ func TestRangeIntersection(t *testing.T) { } // now exclude just that version - rc1.excl = []*Version{v1} + rc1.excl = []Version{v1} if actual = rc1.Intersect(v1); !IsNone(actual) { t.Errorf("Intersection of version with range having specific exclude for that version should produce None; got %q", actual) } @@ -133,8 +133,10 @@ func TestRangeIntersection(t *testing.T) { // Overlaps with nils rc1 = rangeConstraint{ min: newV(1, 0, 0), + max: Version{special: infiniteVersion}, } rc2 = rangeConstraint{ + min: Version{special: zeroVersion}, max: newV(2, 2, 0), } result = rangeConstraint{ @@ -257,7 +259,7 @@ func TestRangeIntersection(t *testing.T) { rc1 = rangeConstraint{ min: newV(1, 5, 0), max: newV(2, 0, 0), - excl: []*Version{ + excl: []Version{ newV(1, 6, 0), }, } @@ -281,7 +283,7 @@ func TestRangeIntersection(t *testing.T) { rc2 = rangeConstraint{ min: newV(1, 0, 0), max: newV(3, 0, 0), - excl: []*Version{ + excl: []Version{ newV(1, 1, 0), }, } @@ -296,9 +298,11 @@ func TestRangeIntersection(t *testing.T) { // Test min, and greater min rc1 = rangeConstraint{ min: newV(1, 0, 0), + max: Version{special: infiniteVersion}, } rc2 = rangeConstraint{ min: newV(1, 5, 0), + max: Version{special: infiniteVersion}, includeMin: true, } @@ -329,13 +333,17 @@ func TestRangeIntersection(t *testing.T) { // Ensure pure excludes come through as they should rc1 = rangeConstraint{ - excl: []*Version{ + min: Version{special: zeroVersion}, + max: Version{special: infiniteVersion}, + excl: []Version{ newV(1, 6, 0), }, } rc2 = rangeConstraint{ - excl: []*Version{ + min: Version{special: zeroVersion}, + max: Version{special: infiniteVersion}, + excl: []Version{ newV(1, 6, 0), newV(1, 7, 0), }, @@ -379,7 +387,7 @@ func TestRangeUnion(t *testing.T) { // now exclude just that version rc2 := rc1.dup() - rc2.excl = []*Version{v1} + rc2.excl = []Version{v1} if actual = rc2.Union(v1); !constraintEq(actual, rc1) { t.Errorf("Union of version with range having specific exclude for that version should produce the range without that exclude; got %q", actual) } @@ -454,8 +462,10 @@ func TestRangeUnion(t *testing.T) { // Overlaps with nils rc1 = rangeConstraint{ min: newV(1, 0, 0), + max: Version{special: infiniteVersion}, } rc2 = rangeConstraint{ + min: Version{special: zeroVersion}, max: newV(2, 2, 0), } @@ -469,6 +479,7 @@ func TestRangeUnion(t *testing.T) { // Just one nil in overlap rc1.max = newV(2, 0, 0) result = rangeConstraint{ + min: Version{special: zeroVersion}, max: newV(2, 2, 0), } @@ -479,10 +490,11 @@ func TestRangeUnion(t *testing.T) { t.Errorf("Got constraint %q, but expected %q", actual, result) } - rc1.max = nil + rc1.max = Version{special: infiniteVersion} rc2.min = newV(1, 5, 0) result = rangeConstraint{ min: newV(1, 0, 0), + max: Version{special: infiniteVersion}, } if actual = rc1.Union(rc2); !constraintEq(actual, result) { @@ -582,7 +594,7 @@ func TestRangeUnion(t *testing.T) { rc1 = rangeConstraint{ min: newV(1, 5, 0), max: newV(2, 0, 0), - excl: []*Version{ + excl: []Version{ newV(1, 6, 0), }, } @@ -606,7 +618,7 @@ func TestRangeUnion(t *testing.T) { rc2 = rangeConstraint{ min: newV(1, 0, 0), max: newV(3, 0, 0), - excl: []*Version{ + excl: []Version{ newV(1, 1, 0), }, } @@ -620,13 +632,17 @@ func TestRangeUnion(t *testing.T) { // Ensure pure excludes come through as they should rc1 = rangeConstraint{ - excl: []*Version{ + min: Version{special: zeroVersion}, + max: Version{special: infiniteVersion}, + excl: []Version{ newV(1, 6, 0), }, } rc2 = rangeConstraint{ - excl: []*Version{ + min: Version{special: zeroVersion}, + max: Version{special: infiniteVersion}, + excl: []Version{ newV(1, 6, 0), newV(1, 7, 0), }, @@ -640,7 +656,9 @@ func TestRangeUnion(t *testing.T) { } rc1 = rangeConstraint{ - excl: []*Version{ + min: Version{special: zeroVersion}, + max: Version{special: infiniteVersion}, + excl: []Version{ newV(1, 5, 0), }, } @@ -738,7 +756,7 @@ func TestUnionIntersection(t *testing.T) { } // Ensure excludes carry as they should - rc1.excl = []*Version{newV(1, 5, 5)} + rc1.excl = []Version{newV(1, 5, 5)} u1 = unionConstraint{rc1, rc2} ur = unionConstraint{rc1, rc4} diff --git a/vendor/github.com/Masterminds/semver/union.go b/vendor/github.com/Masterminds/semver/union.go index 26598281ee..bc794f88e0 100644 --- a/vendor/github.com/Masterminds/semver/union.go +++ b/vendor/github.com/Masterminds/semver/union.go @@ -4,14 +4,15 @@ import "strings" type unionConstraint []realConstraint -func (uc unionConstraint) Matches(v *Version) error { +func (uc unionConstraint) Matches(v Version) error { var uce MultiMatchFailure for _, c := range uc { - if err := c.Matches(v); err == nil { + err := c.Matches(v) + if err == nil { return nil - } else { - uce = append(uce, err.(MatchFailure)) } + uce = append(uce, err.(MatchFailure)) + } return uce @@ -25,7 +26,7 @@ func (uc unionConstraint) Intersect(c2 Constraint) Constraint { return None() case any: return uc - case *Version: + case Version: return c2 case rangeConstraint: other = append(other, tc2) @@ -70,6 +71,16 @@ func (uc unionConstraint) String() string { return strings.Join(pieces, " || ") } + +func (uc unionConstraint) ImpliedCaretString() string { + var pieces []string + for _, c := range uc { + pieces = append(pieces, c.ImpliedCaretString()) + } + + return strings.Join(pieces, " || ") +} + func (unionConstraint) _private() {} type constraintList []realConstraint @@ -86,12 +97,12 @@ func (cl constraintList) Less(i, j int) bool { ic, jc := cl[i], cl[j] switch tic := ic.(type) { - case *Version: + case Version: switch tjc := jc.(type) { - case *Version: + case Version: return tic.LessThan(tjc) case rangeConstraint: - if tjc.min == nil { + if tjc.minIsZero() { return false } @@ -104,8 +115,8 @@ func (cl constraintList) Less(i, j int) bool { } case rangeConstraint: switch tjc := jc.(type) { - case *Version: - if tic.min == nil { + case Version: + if tic.minIsZero() { return true } @@ -116,10 +127,10 @@ func (cl constraintList) Less(i, j int) bool { } return tic.min.LessThan(tjc) case rangeConstraint: - if tic.min == nil { + if tic.minIsZero() { return true } - if tjc.min == nil { + if tjc.minIsZero() { return false } return tic.min.LessThan(tjc.min) diff --git a/vendor/github.com/Masterminds/semver/version.go b/vendor/github.com/Masterminds/semver/version.go index f984825f8b..449b754040 100644 --- a/vendor/github.com/Masterminds/semver/version.go +++ b/vendor/github.com/Masterminds/semver/version.go @@ -29,13 +29,14 @@ func (b badVersionSegment) Error() string { return fmt.Sprintf("Error parsing version segment: %s", b.e) } -// Controls whether or not parsed constraints are cached +// CacheVersions controls whether or not parsed constraints are cached. Defaults +// to true. var CacheVersions = true var versionCache = make(map[string]vcache) var versionCacheLock sync.RWMutex type vcache struct { - v *Version + v Version err error } @@ -44,12 +45,21 @@ const SemVerRegex string = `v?([0-9]+)(\.[0-9]+)?(\.[0-9]+)?` + `(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` +type specialVersion uint8 + +const ( + notSpecial specialVersion = iota + zeroVersion + infiniteVersion +) + // Version represents a single semantic version. type Version struct { major, minor, patch uint64 pre string metadata string original string + special specialVersion } func init() { @@ -58,7 +68,7 @@ func init() { // NewVersion parses a given version and returns an instance of Version or // an error if unable to parse the version. -func NewVersion(v string) (*Version, error) { +func NewVersion(v string) (Version, error) { if CacheVersions { versionCacheLock.RLock() if sv, exists := versionCache[v]; exists { @@ -75,10 +85,10 @@ func NewVersion(v string) (*Version, error) { versionCache[v] = vcache{err: ErrInvalidSemVer} versionCacheLock.Unlock() } - return nil, ErrInvalidSemVer + return Version{}, ErrInvalidSemVer } - sv := &Version{ + sv := Version{ metadata: m[8], pre: m[5], original: v, @@ -94,7 +104,7 @@ func NewVersion(v string) (*Version, error) { versionCacheLock.Unlock() } - return nil, bvs + return Version{}, bvs } sv.major = temp @@ -108,7 +118,7 @@ func NewVersion(v string) (*Version, error) { versionCacheLock.Unlock() } - return nil, bvs + return Version{}, bvs } sv.minor = temp } else { @@ -125,7 +135,7 @@ func NewVersion(v string) (*Version, error) { versionCacheLock.Unlock() } - return nil, bvs + return Version{}, bvs } sv.patch = temp } else { @@ -146,10 +156,28 @@ func NewVersion(v string) (*Version, error) { // See the Original() method to retrieve the original value. Semantic Versions // don't contain a leading v per the spec. Instead it's optional on // impelementation. -func (v *Version) String() string { +func (v Version) String() string { + return v.toString(false) +} + +// ImpliedCaretString follows the same rules as String(), but in accordance with +// the Constraint interface will always print a leading "=", as all Versions, +// when acting as a Constraint, act as exact matches. +func (v Version) ImpliedCaretString() string { + return v.toString(true) +} + +func (v Version) toString(ic bool) string { var buf bytes.Buffer - fmt.Fprintf(&buf, "%d.%d.%d", v.major, v.minor, v.patch) + var base string + if ic { + base = "=%d.%d.%d" + } else { + base = "%d.%d.%d" + } + + fmt.Fprintf(&buf, base, v.major, v.minor, v.patch) if v.pre != "" { fmt.Fprintf(&buf, "-%s", v.pre) } @@ -161,7 +189,7 @@ func (v *Version) String() string { } // Original returns the original value passed in to be parsed. -func (v *Version) Original() string { +func (v Version) Original() string { return v.original } @@ -181,44 +209,29 @@ func (v *Version) Patch() uint64 { } // Prerelease returns the pre-release version. -func (v *Version) Prerelease() string { +func (v Version) Prerelease() string { return v.pre } // Metadata returns the metadata on the version. -func (v *Version) Metadata() string { +func (v Version) Metadata() string { return v.metadata } // LessThan tests if one version is less than another one. -func (v *Version) LessThan(o *Version) bool { - // If a nil version was passed, fail and bail out early. - if o == nil { - return false - } - +func (v Version) LessThan(o Version) bool { return v.Compare(o) < 0 } // GreaterThan tests if one version is greater than another one. -func (v *Version) GreaterThan(o *Version) bool { - // If a nil version was passed, fail and bail out early. - if o == nil { - return false - } - +func (v Version) GreaterThan(o Version) bool { return v.Compare(o) > 0 } // Equal tests if two versions are equal to each other. // Note, versions can be equal with different metadata since metadata // is not considered part of the comparable version. -func (v *Version) Equal(o *Version) bool { - // If a nil version was passed, fail and bail out early. - if o == nil { - return false - } - +func (v Version) Equal(o Version) bool { return v.Compare(o) == 0 } @@ -227,7 +240,27 @@ func (v *Version) Equal(o *Version) bool { // // Versions are compared by X.Y.Z. Build metadata is ignored. Prerelease is // lower than the version without a prerelease. -func (v *Version) Compare(o *Version) int { +func (v Version) Compare(o Version) int { + // The special field supercedes all the other information. If it's not + // equal, we can skip out early + if v.special != o.special { + switch v.special { + case zeroVersion: + return -1 + case notSpecial: + if o.special == zeroVersion { + return 1 + } + return -1 + case infiniteVersion: + return 1 + } + } else if v.special != notSpecial { + // If special fields are equal and not notSpecial, then they're + // necessarily equal + return 0 + } + // Compare the major, minor, and patch version for differences. If a // difference is found return the comparison. if d := compareSegment(v.Major(), o.Major()); d != 0 { @@ -257,7 +290,10 @@ func (v *Version) Compare(o *Version) int { return comparePrerelease(ps, po) } -func (v *Version) Matches(v2 *Version) error { +// Matches checks that a verstions match. If they do not, +// an error is returned indcating the problem; if it does, the error is nil. +// This is part of the Constraint interface. +func (v Version) Matches(v2 Version) error { if v.Equal(v2) { return nil } @@ -265,18 +301,23 @@ func (v *Version) Matches(v2 *Version) error { return VersionMatchFailure{v: v, other: v2} } -func (v *Version) MatchesAny(c Constraint) bool { - if v2, ok := c.(*Version); ok { +// MatchesAny checks if an instance of a version matches a constraint which can +// include anything matching the Constraint interface. +func (v Version) MatchesAny(c Constraint) bool { + if v2, ok := c.(Version); ok { return v.Equal(v2) - } else { - // The other implementations all have specific handling for this; fall - // back on theirs. - return c.MatchesAny(v) } + + // The other implementations all have specific handling for this; fall + // back on theirs. + return c.MatchesAny(v) } -func (v *Version) Intersect(c Constraint) Constraint { - if v2, ok := c.(*Version); ok { +// Intersect computes the intersection between the receiving Constraint and +// passed Constraint, and returns a new Constraint representing the result. +// This is part of the Constraint interface. +func (v Version) Intersect(c Constraint) Constraint { + if v2, ok := c.(Version); ok { if v.Equal(v2) { return v } @@ -286,12 +327,15 @@ func (v *Version) Intersect(c Constraint) Constraint { return c.Intersect(v) } -func (v *Version) Union(c Constraint) Constraint { - if v2, ok := c.(*Version); ok && v.Equal(v2) { +// Union computes the union between the receiving Constraint and the passed +// Constraint, and returns a new Constraint representing the result. +// This is part of the Constraint interface. +func (v Version) Union(c Constraint) Constraint { + if v2, ok := c.(Version); ok && v.Equal(v2) { return v - } else { - return Union(v, c) } + + return Union(v, c) } func (Version) _private() {} @@ -327,7 +371,7 @@ func comparePrerelease(v, o string) int { // Iterate over each part of the prereleases to compare the differences. for i := 0; i < l; i++ { - // Since the lentgh of the parts can be different we need to create + // Since the length of the parts can be different we need to create // a placeholder. This is to avoid out of bounds issues. stemp := "" if i < slen { @@ -360,14 +404,14 @@ func comparePrePart(s, o string) int { // When s or o are empty we can use the other in an attempt to determine // the response. if o == "" { - _, n := strconv.ParseInt(s, 10, 64) + _, n := strconv.ParseUint(s, 10, 64) if n != nil { return -1 } return 1 } if s == "" { - _, n := strconv.ParseInt(o, 10, 64) + _, n := strconv.ParseUint(o, 10, 64) if n != nil { return 1 } @@ -379,3 +423,25 @@ func comparePrePart(s, o string) int { } return -1 } + +func numPartsEq(v1, v2 Version) bool { + if v1.special != v2.special { + return false + } + if v1.special != notSpecial { + // If special fields are equal and not notSpecial, then the versions are + // necessarily equal, so their numeric parts are too. + return true + } + + if v1.major != v2.major { + return false + } + if v1.minor != v2.minor { + return false + } + if v1.patch != v2.patch { + return false + } + return true +} diff --git a/vendor/github.com/Masterminds/semver/version_test.go b/vendor/github.com/Masterminds/semver/version_test.go index e8ad413a79..1fae87f526 100644 --- a/vendor/github.com/Masterminds/semver/version_test.go +++ b/vendor/github.com/Masterminds/semver/version_test.go @@ -180,6 +180,33 @@ func TestCompare(t *testing.T) { ) } } + + // One-off tests for special version comparisons + zero := Version{special: zeroVersion} + inf := Version{special: infiniteVersion} + + if zero.Compare(inf) != -1 { + t.Error("Zero version should always be less than infinite version") + } + if zero.Compare(zero) != 0 { + t.Error("Zero version should equal itself") + } + if inf.Compare(zero) != 1 { + t.Error("Infinite version should always be greater than zero version") + } + if inf.Compare(inf) != 0 { + t.Error("Infinite version should equal itself") + } + + // Need to work vs. a normal version, too. + v := Version{} + + if zero.Compare(v) != -1 { + t.Error("Zero version should always be less than any normal version") + } + if inf.Compare(v) != 1 { + t.Error("Infinite version should always be greater than any normal version") + } } func TestLessThan(t *testing.T) { diff --git a/vendor/github.com/Masterminds/vcs/git.go b/vendor/github.com/Masterminds/vcs/git.go index 6467136791..4094e0d03c 100644 --- a/vendor/github.com/Masterminds/vcs/git.go +++ b/vendor/github.com/Masterminds/vcs/git.go @@ -366,7 +366,7 @@ func (s *GitRepo) Ping() bool { // EscapePathSeparator escapes the path separator by replacing it with several. // Note: this is harmless on Unix, and needed on Windows. -func EscapePathSeparator(path string) string { +func EscapePathSeparator(path string) (string) { switch runtime.GOOS { case `windows`: // On Windows, triple all path separators. @@ -379,7 +379,7 @@ func EscapePathSeparator(path string) string { // used with --prefix, like this: --prefix=C:\foo\bar\ -> --prefix=C:\\\foo\\\bar\\\ return strings.Replace(path, string(os.PathSeparator), - string(os.PathSeparator)+string(os.PathSeparator)+string(os.PathSeparator), + string(os.PathSeparator) + string(os.PathSeparator) + string(os.PathSeparator), -1) default: return path @@ -404,7 +404,7 @@ func (s *GitRepo) ExportDir(dir string) error { return NewLocalError("Unable to create directory", err, "") } - path = EscapePathSeparator(dir) + path = EscapePathSeparator( dir ) out, err := s.RunFromDir("git", "checkout-index", "-f", "-a", "--prefix="+path) s.log(out) if err != nil { @@ -412,7 +412,7 @@ func (s *GitRepo) ExportDir(dir string) error { } // and now, the horror of submodules - path = EscapePathSeparator(dir + "$path" + string(os.PathSeparator)) + path = EscapePathSeparator( dir + "$path" + string(os.PathSeparator) ) out, err = s.RunFromDir("git", "submodule", "foreach", "--recursive", "git checkout-index -f -a --prefix="+path) s.log(out) if err != nil { diff --git a/vendor/github.com/Masterminds/vcs/git_test.go b/vendor/github.com/Masterminds/vcs/git_test.go index b7b9a24aae..b58c2c2efd 100644 --- a/vendor/github.com/Masterminds/vcs/git_test.go +++ b/vendor/github.com/Masterminds/vcs/git_test.go @@ -559,6 +559,7 @@ func TestGitSubmoduleHandling2(t *testing.T) { t.Errorf("Current failed to detect Git on tip of master. Got version: %s", v) } + tempDir2, err := ioutil.TempDir("", "go-vcs-git-tests-export") if err != nil { t.Fatalf("Error creating temp directory: %s", err) @@ -582,7 +583,7 @@ func TestGitSubmoduleHandling2(t *testing.T) { t.Errorf("Error checking exported file in Git: %s", err) } - _, err = os.Stat(filepath.Join(filepath.Join(exportDir, "definitions"), "README.md")) + _, err = os.Stat(filepath.Join( filepath.Join(exportDir, "definitions"), "README.md")) if err != nil { t.Errorf("Error checking exported file in Git: %s", err) } diff --git a/vendor/github.com/go-yaml/yaml/.travis.yml b/vendor/github.com/go-yaml/yaml/.travis.yml new file mode 100644 index 0000000000..004172a2e3 --- /dev/null +++ b/vendor/github.com/go-yaml/yaml/.travis.yml @@ -0,0 +1,9 @@ +language: go + +go: + - 1.4 + - 1.5 + - 1.6 + - tip + +go_import_path: gopkg.in/yaml.v2 diff --git a/vendor/github.com/go-yaml/yaml/LICENSE b/vendor/github.com/go-yaml/yaml/LICENSE new file mode 100644 index 0000000000..866d74a7ad --- /dev/null +++ b/vendor/github.com/go-yaml/yaml/LICENSE @@ -0,0 +1,13 @@ +Copyright 2011-2016 Canonical Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/github.com/go-yaml/yaml/LICENSE.libyaml b/vendor/github.com/go-yaml/yaml/LICENSE.libyaml new file mode 100644 index 0000000000..8da58fbf6f --- /dev/null +++ b/vendor/github.com/go-yaml/yaml/LICENSE.libyaml @@ -0,0 +1,31 @@ +The following files were ported to Go from C files of libyaml, and thus +are still covered by their original copyright and license: + + apic.go + emitterc.go + parserc.go + readerc.go + scannerc.go + writerc.go + yamlh.go + yamlprivateh.go + +Copyright (c) 2006 Kirill Simonov + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/go-yaml/yaml/README.md b/vendor/github.com/go-yaml/yaml/README.md new file mode 100644 index 0000000000..1884de6a7d --- /dev/null +++ b/vendor/github.com/go-yaml/yaml/README.md @@ -0,0 +1,131 @@ +# YAML support for the Go language + +Introduction +------------ + +The yaml package enables Go programs to comfortably encode and decode YAML +values. It was developed within [Canonical](https://www.canonical.com) as +part of the [juju](https://juju.ubuntu.com) project, and is based on a +pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML) +C library to parse and generate YAML data quickly and reliably. + +Compatibility +------------- + +The yaml package supports most of YAML 1.1 and 1.2, including support for +anchors, tags, map merging, etc. Multi-document unmarshalling is not yet +implemented, and base-60 floats from YAML 1.1 are purposefully not +supported since they're a poor design and are gone in YAML 1.2. + +Installation and usage +---------------------- + +The import path for the package is *gopkg.in/yaml.v2*. + +To install it, run: + + go get gopkg.in/yaml.v2 + +API documentation +----------------- + +If opened in a browser, the import path itself leads to the API documentation: + + * [https://gopkg.in/yaml.v2](https://gopkg.in/yaml.v2) + +API stability +------------- + +The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in). + + +License +------- + +The yaml package is licensed under the Apache License 2.0. Please see the LICENSE file for details. + + +Example +------- + +```Go +package main + +import ( + "fmt" + "log" + + "gopkg.in/yaml.v2" +) + +var data = ` +a: Easy! +b: + c: 2 + d: [3, 4] +` + +type T struct { + A string + B struct { + RenamedC int `yaml:"c"` + D []int `yaml:",flow"` + } +} + +func main() { + t := T{} + + err := yaml.Unmarshal([]byte(data), &t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t:\n%v\n\n", t) + + d, err := yaml.Marshal(&t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t dump:\n%s\n\n", string(d)) + + m := make(map[interface{}]interface{}) + + err = yaml.Unmarshal([]byte(data), &m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m:\n%v\n\n", m) + + d, err = yaml.Marshal(&m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m dump:\n%s\n\n", string(d)) +} +``` + +This example will generate the following output: + +``` +--- t: +{Easy! {2 [3 4]}} + +--- t dump: +a: Easy! +b: + c: 2 + d: [3, 4] + + +--- m: +map[a:Easy! b:map[c:2 d:[3 4]]] + +--- m dump: +a: Easy! +b: + c: 2 + d: + - 3 + - 4 +``` + diff --git a/vendor/github.com/go-yaml/yaml/apic.go b/vendor/github.com/go-yaml/yaml/apic.go new file mode 100644 index 0000000000..95ec014e8c --- /dev/null +++ b/vendor/github.com/go-yaml/yaml/apic.go @@ -0,0 +1,742 @@ +package yaml + +import ( + "io" + "os" +) + +func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) { + //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens)) + + // Check if we can move the queue at the beginning of the buffer. + if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) { + if parser.tokens_head != len(parser.tokens) { + copy(parser.tokens, parser.tokens[parser.tokens_head:]) + } + parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head] + parser.tokens_head = 0 + } + parser.tokens = append(parser.tokens, *token) + if pos < 0 { + return + } + copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:]) + parser.tokens[parser.tokens_head+pos] = *token +} + +// Create a new parser object. +func yaml_parser_initialize(parser *yaml_parser_t) bool { + *parser = yaml_parser_t{ + raw_buffer: make([]byte, 0, input_raw_buffer_size), + buffer: make([]byte, 0, input_buffer_size), + } + return true +} + +// Destroy a parser object. +func yaml_parser_delete(parser *yaml_parser_t) { + *parser = yaml_parser_t{} +} + +// String read handler. +func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + if parser.input_pos == len(parser.input) { + return 0, io.EOF + } + n = copy(buffer, parser.input[parser.input_pos:]) + parser.input_pos += n + return n, nil +} + +// File read handler. +func yaml_file_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + return parser.input_file.Read(buffer) +} + +// Set a string input. +func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_string_read_handler + parser.input = input + parser.input_pos = 0 +} + +// Set a file input. +func yaml_parser_set_input_file(parser *yaml_parser_t, file *os.File) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_file_read_handler + parser.input_file = file +} + +// Set the source encoding. +func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) { + if parser.encoding != yaml_ANY_ENCODING { + panic("must set the encoding only once") + } + parser.encoding = encoding +} + +// Create a new emitter object. +func yaml_emitter_initialize(emitter *yaml_emitter_t) bool { + *emitter = yaml_emitter_t{ + buffer: make([]byte, output_buffer_size), + raw_buffer: make([]byte, 0, output_raw_buffer_size), + states: make([]yaml_emitter_state_t, 0, initial_stack_size), + events: make([]yaml_event_t, 0, initial_queue_size), + } + return true +} + +// Destroy an emitter object. +func yaml_emitter_delete(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{} +} + +// String write handler. +func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + *emitter.output_buffer = append(*emitter.output_buffer, buffer...) + return nil +} + +// File write handler. +func yaml_file_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + _, err := emitter.output_file.Write(buffer) + return err +} + +// Set a string output. +func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_string_write_handler + emitter.output_buffer = output_buffer +} + +// Set a file output. +func yaml_emitter_set_output_file(emitter *yaml_emitter_t, file io.Writer) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_file_write_handler + emitter.output_file = file +} + +// Set the output encoding. +func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) { + if emitter.encoding != yaml_ANY_ENCODING { + panic("must set the output encoding only once") + } + emitter.encoding = encoding +} + +// Set the canonical output style. +func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) { + emitter.canonical = canonical +} + +//// Set the indentation increment. +func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) { + if indent < 2 || indent > 9 { + indent = 2 + } + emitter.best_indent = indent +} + +// Set the preferred line width. +func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) { + if width < 0 { + width = -1 + } + emitter.best_width = width +} + +// Set if unescaped non-ASCII characters are allowed. +func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) { + emitter.unicode = unicode +} + +// Set the preferred line break character. +func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) { + emitter.line_break = line_break +} + +///* +// * Destroy a token object. +// */ +// +//YAML_DECLARE(void) +//yaml_token_delete(yaml_token_t *token) +//{ +// assert(token); // Non-NULL token object expected. +// +// switch (token.type) +// { +// case YAML_TAG_DIRECTIVE_TOKEN: +// yaml_free(token.data.tag_directive.handle); +// yaml_free(token.data.tag_directive.prefix); +// break; +// +// case YAML_ALIAS_TOKEN: +// yaml_free(token.data.alias.value); +// break; +// +// case YAML_ANCHOR_TOKEN: +// yaml_free(token.data.anchor.value); +// break; +// +// case YAML_TAG_TOKEN: +// yaml_free(token.data.tag.handle); +// yaml_free(token.data.tag.suffix); +// break; +// +// case YAML_SCALAR_TOKEN: +// yaml_free(token.data.scalar.value); +// break; +// +// default: +// break; +// } +// +// memset(token, 0, sizeof(yaml_token_t)); +//} +// +///* +// * Check if a string is a valid UTF-8 sequence. +// * +// * Check 'reader.c' for more details on UTF-8 encoding. +// */ +// +//static int +//yaml_check_utf8(yaml_char_t *start, size_t length) +//{ +// yaml_char_t *end = start+length; +// yaml_char_t *pointer = start; +// +// while (pointer < end) { +// unsigned char octet; +// unsigned int width; +// unsigned int value; +// size_t k; +// +// octet = pointer[0]; +// width = (octet & 0x80) == 0x00 ? 1 : +// (octet & 0xE0) == 0xC0 ? 2 : +// (octet & 0xF0) == 0xE0 ? 3 : +// (octet & 0xF8) == 0xF0 ? 4 : 0; +// value = (octet & 0x80) == 0x00 ? octet & 0x7F : +// (octet & 0xE0) == 0xC0 ? octet & 0x1F : +// (octet & 0xF0) == 0xE0 ? octet & 0x0F : +// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0; +// if (!width) return 0; +// if (pointer+width > end) return 0; +// for (k = 1; k < width; k ++) { +// octet = pointer[k]; +// if ((octet & 0xC0) != 0x80) return 0; +// value = (value << 6) + (octet & 0x3F); +// } +// if (!((width == 1) || +// (width == 2 && value >= 0x80) || +// (width == 3 && value >= 0x800) || +// (width == 4 && value >= 0x10000))) return 0; +// +// pointer += width; +// } +// +// return 1; +//} +// + +// Create STREAM-START. +func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) bool { + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + encoding: encoding, + } + return true +} + +// Create STREAM-END. +func yaml_stream_end_event_initialize(event *yaml_event_t) bool { + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + } + return true +} + +// Create DOCUMENT-START. +func yaml_document_start_event_initialize(event *yaml_event_t, version_directive *yaml_version_directive_t, + tag_directives []yaml_tag_directive_t, implicit bool) bool { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: implicit, + } + return true +} + +// Create DOCUMENT-END. +func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) bool { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + implicit: implicit, + } + return true +} + +///* +// * Create ALIAS. +// */ +// +//YAML_DECLARE(int) +//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t) +//{ +// mark yaml_mark_t = { 0, 0, 0 } +// anchor_copy *yaml_char_t = NULL +// +// assert(event) // Non-NULL event object is expected. +// assert(anchor) // Non-NULL anchor is expected. +// +// if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0 +// +// anchor_copy = yaml_strdup(anchor) +// if (!anchor_copy) +// return 0 +// +// ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark) +// +// return 1 +//} + +// Create SCALAR. +func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + anchor: anchor, + tag: tag, + value: value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-START. +func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-END. +func yaml_sequence_end_event_initialize(event *yaml_event_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + } + return true +} + +// Create MAPPING-START. +func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) bool { + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } + return true +} + +// Create MAPPING-END. +func yaml_mapping_end_event_initialize(event *yaml_event_t) bool { + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + } + return true +} + +// Destroy an event object. +func yaml_event_delete(event *yaml_event_t) { + *event = yaml_event_t{} +} + +///* +// * Create a document object. +// */ +// +//YAML_DECLARE(int) +//yaml_document_initialize(document *yaml_document_t, +// version_directive *yaml_version_directive_t, +// tag_directives_start *yaml_tag_directive_t, +// tag_directives_end *yaml_tag_directive_t, +// start_implicit int, end_implicit int) +//{ +// struct { +// error yaml_error_type_t +// } context +// struct { +// start *yaml_node_t +// end *yaml_node_t +// top *yaml_node_t +// } nodes = { NULL, NULL, NULL } +// version_directive_copy *yaml_version_directive_t = NULL +// struct { +// start *yaml_tag_directive_t +// end *yaml_tag_directive_t +// top *yaml_tag_directive_t +// } tag_directives_copy = { NULL, NULL, NULL } +// value yaml_tag_directive_t = { NULL, NULL } +// mark yaml_mark_t = { 0, 0, 0 } +// +// assert(document) // Non-NULL document object is expected. +// assert((tag_directives_start && tag_directives_end) || +// (tag_directives_start == tag_directives_end)) +// // Valid tag directives are expected. +// +// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error +// +// if (version_directive) { +// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t)) +// if (!version_directive_copy) goto error +// version_directive_copy.major = version_directive.major +// version_directive_copy.minor = version_directive.minor +// } +// +// if (tag_directives_start != tag_directives_end) { +// tag_directive *yaml_tag_directive_t +// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE)) +// goto error +// for (tag_directive = tag_directives_start +// tag_directive != tag_directives_end; tag_directive ++) { +// assert(tag_directive.handle) +// assert(tag_directive.prefix) +// if (!yaml_check_utf8(tag_directive.handle, +// strlen((char *)tag_directive.handle))) +// goto error +// if (!yaml_check_utf8(tag_directive.prefix, +// strlen((char *)tag_directive.prefix))) +// goto error +// value.handle = yaml_strdup(tag_directive.handle) +// value.prefix = yaml_strdup(tag_directive.prefix) +// if (!value.handle || !value.prefix) goto error +// if (!PUSH(&context, tag_directives_copy, value)) +// goto error +// value.handle = NULL +// value.prefix = NULL +// } +// } +// +// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy, +// tag_directives_copy.start, tag_directives_copy.top, +// start_implicit, end_implicit, mark, mark) +// +// return 1 +// +//error: +// STACK_DEL(&context, nodes) +// yaml_free(version_directive_copy) +// while (!STACK_EMPTY(&context, tag_directives_copy)) { +// value yaml_tag_directive_t = POP(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// } +// STACK_DEL(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// +// return 0 +//} +// +///* +// * Destroy a document object. +// */ +// +//YAML_DECLARE(void) +//yaml_document_delete(document *yaml_document_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// tag_directive *yaml_tag_directive_t +// +// context.error = YAML_NO_ERROR // Eliminate a compliler warning. +// +// assert(document) // Non-NULL document object is expected. +// +// while (!STACK_EMPTY(&context, document.nodes)) { +// node yaml_node_t = POP(&context, document.nodes) +// yaml_free(node.tag) +// switch (node.type) { +// case YAML_SCALAR_NODE: +// yaml_free(node.data.scalar.value) +// break +// case YAML_SEQUENCE_NODE: +// STACK_DEL(&context, node.data.sequence.items) +// break +// case YAML_MAPPING_NODE: +// STACK_DEL(&context, node.data.mapping.pairs) +// break +// default: +// assert(0) // Should not happen. +// } +// } +// STACK_DEL(&context, document.nodes) +// +// yaml_free(document.version_directive) +// for (tag_directive = document.tag_directives.start +// tag_directive != document.tag_directives.end +// tag_directive++) { +// yaml_free(tag_directive.handle) +// yaml_free(tag_directive.prefix) +// } +// yaml_free(document.tag_directives.start) +// +// memset(document, 0, sizeof(yaml_document_t)) +//} +// +///** +// * Get a document node. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_node(document *yaml_document_t, index int) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (index > 0 && document.nodes.start + index <= document.nodes.top) { +// return document.nodes.start + index - 1 +// } +// return NULL +//} +// +///** +// * Get the root object. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_root_node(document *yaml_document_t) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (document.nodes.top != document.nodes.start) { +// return document.nodes.start +// } +// return NULL +//} +// +///* +// * Add a scalar node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_scalar(document *yaml_document_t, +// tag *yaml_char_t, value *yaml_char_t, length int, +// style yaml_scalar_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// value_copy *yaml_char_t = NULL +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// assert(value) // Non-NULL value is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (length < 0) { +// length = strlen((char *)value) +// } +// +// if (!yaml_check_utf8(value, length)) goto error +// value_copy = yaml_malloc(length+1) +// if (!value_copy) goto error +// memcpy(value_copy, value, length) +// value_copy[length] = '\0' +// +// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// yaml_free(tag_copy) +// yaml_free(value_copy) +// +// return 0 +//} +// +///* +// * Add a sequence node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_sequence(document *yaml_document_t, +// tag *yaml_char_t, style yaml_sequence_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_item_t +// end *yaml_node_item_t +// top *yaml_node_item_t +// } items = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error +// +// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, items) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Add a mapping node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_mapping(document *yaml_document_t, +// tag *yaml_char_t, style yaml_mapping_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_pair_t +// end *yaml_node_pair_t +// top *yaml_node_pair_t +// } pairs = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error +// +// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, pairs) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Append an item to a sequence node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_sequence_item(document *yaml_document_t, +// sequence int, item int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// assert(document) // Non-NULL document is required. +// assert(sequence > 0 +// && document.nodes.start + sequence <= document.nodes.top) +// // Valid sequence id is required. +// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE) +// // A sequence node is required. +// assert(item > 0 && document.nodes.start + item <= document.nodes.top) +// // Valid item id is required. +// +// if (!PUSH(&context, +// document.nodes.start[sequence-1].data.sequence.items, item)) +// return 0 +// +// return 1 +//} +// +///* +// * Append a pair of a key and a value to a mapping node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_mapping_pair(document *yaml_document_t, +// mapping int, key int, value int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// pair yaml_node_pair_t +// +// assert(document) // Non-NULL document is required. +// assert(mapping > 0 +// && document.nodes.start + mapping <= document.nodes.top) +// // Valid mapping id is required. +// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE) +// // A mapping node is required. +// assert(key > 0 && document.nodes.start + key <= document.nodes.top) +// // Valid key id is required. +// assert(value > 0 && document.nodes.start + value <= document.nodes.top) +// // Valid value id is required. +// +// pair.key = key +// pair.value = value +// +// if (!PUSH(&context, +// document.nodes.start[mapping-1].data.mapping.pairs, pair)) +// return 0 +// +// return 1 +//} +// +// diff --git a/vendor/github.com/go-yaml/yaml/decode.go b/vendor/github.com/go-yaml/yaml/decode.go new file mode 100644 index 0000000000..052ecfcd19 --- /dev/null +++ b/vendor/github.com/go-yaml/yaml/decode.go @@ -0,0 +1,682 @@ +package yaml + +import ( + "encoding" + "encoding/base64" + "fmt" + "math" + "reflect" + "strconv" + "time" +) + +const ( + documentNode = 1 << iota + mappingNode + sequenceNode + scalarNode + aliasNode +) + +type node struct { + kind int + line, column int + tag string + value string + implicit bool + children []*node + anchors map[string]*node +} + +// ---------------------------------------------------------------------------- +// Parser, produces a node tree out of a libyaml event stream. + +type parser struct { + parser yaml_parser_t + event yaml_event_t + doc *node +} + +func newParser(b []byte) *parser { + p := parser{} + if !yaml_parser_initialize(&p.parser) { + panic("failed to initialize YAML emitter") + } + + if len(b) == 0 { + b = []byte{'\n'} + } + + yaml_parser_set_input_string(&p.parser, b) + + p.skip() + if p.event.typ != yaml_STREAM_START_EVENT { + panic("expected stream start event, got " + strconv.Itoa(int(p.event.typ))) + } + p.skip() + return &p +} + +func (p *parser) destroy() { + if p.event.typ != yaml_NO_EVENT { + yaml_event_delete(&p.event) + } + yaml_parser_delete(&p.parser) +} + +func (p *parser) skip() { + if p.event.typ != yaml_NO_EVENT { + if p.event.typ == yaml_STREAM_END_EVENT { + failf("attempted to go past the end of stream; corrupted value?") + } + yaml_event_delete(&p.event) + } + if !yaml_parser_parse(&p.parser, &p.event) { + p.fail() + } +} + +func (p *parser) fail() { + var where string + var line int + if p.parser.problem_mark.line != 0 { + line = p.parser.problem_mark.line + } else if p.parser.context_mark.line != 0 { + line = p.parser.context_mark.line + } + if line != 0 { + where = "line " + strconv.Itoa(line) + ": " + } + var msg string + if len(p.parser.problem) > 0 { + msg = p.parser.problem + } else { + msg = "unknown problem parsing YAML content" + } + failf("%s%s", where, msg) +} + +func (p *parser) anchor(n *node, anchor []byte) { + if anchor != nil { + p.doc.anchors[string(anchor)] = n + } +} + +func (p *parser) parse() *node { + switch p.event.typ { + case yaml_SCALAR_EVENT: + return p.scalar() + case yaml_ALIAS_EVENT: + return p.alias() + case yaml_MAPPING_START_EVENT: + return p.mapping() + case yaml_SEQUENCE_START_EVENT: + return p.sequence() + case yaml_DOCUMENT_START_EVENT: + return p.document() + case yaml_STREAM_END_EVENT: + // Happens when attempting to decode an empty buffer. + return nil + default: + panic("attempted to parse unknown event: " + strconv.Itoa(int(p.event.typ))) + } +} + +func (p *parser) node(kind int) *node { + return &node{ + kind: kind, + line: p.event.start_mark.line, + column: p.event.start_mark.column, + } +} + +func (p *parser) document() *node { + n := p.node(documentNode) + n.anchors = make(map[string]*node) + p.doc = n + p.skip() + n.children = append(n.children, p.parse()) + if p.event.typ != yaml_DOCUMENT_END_EVENT { + panic("expected end of document event but got " + strconv.Itoa(int(p.event.typ))) + } + p.skip() + return n +} + +func (p *parser) alias() *node { + n := p.node(aliasNode) + n.value = string(p.event.anchor) + p.skip() + return n +} + +func (p *parser) scalar() *node { + n := p.node(scalarNode) + n.value = string(p.event.value) + n.tag = string(p.event.tag) + n.implicit = p.event.implicit + p.anchor(n, p.event.anchor) + p.skip() + return n +} + +func (p *parser) sequence() *node { + n := p.node(sequenceNode) + p.anchor(n, p.event.anchor) + p.skip() + for p.event.typ != yaml_SEQUENCE_END_EVENT { + n.children = append(n.children, p.parse()) + } + p.skip() + return n +} + +func (p *parser) mapping() *node { + n := p.node(mappingNode) + p.anchor(n, p.event.anchor) + p.skip() + for p.event.typ != yaml_MAPPING_END_EVENT { + n.children = append(n.children, p.parse(), p.parse()) + } + p.skip() + return n +} + +// ---------------------------------------------------------------------------- +// Decoder, unmarshals a node into a provided value. + +type decoder struct { + doc *node + aliases map[string]bool + mapType reflect.Type + terrors []string +} + +var ( + mapItemType = reflect.TypeOf(MapItem{}) + durationType = reflect.TypeOf(time.Duration(0)) + defaultMapType = reflect.TypeOf(map[interface{}]interface{}{}) + ifaceType = defaultMapType.Elem() +) + +func newDecoder() *decoder { + d := &decoder{mapType: defaultMapType} + d.aliases = make(map[string]bool) + return d +} + +func (d *decoder) terror(n *node, tag string, out reflect.Value) { + if n.tag != "" { + tag = n.tag + } + value := n.value + if tag != yaml_SEQ_TAG && tag != yaml_MAP_TAG { + if len(value) > 10 { + value = " `" + value[:7] + "...`" + } else { + value = " `" + value + "`" + } + } + d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.line+1, shortTag(tag), value, out.Type())) +} + +func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) { + terrlen := len(d.terrors) + err := u.UnmarshalYAML(func(v interface{}) (err error) { + defer handleErr(&err) + d.unmarshal(n, reflect.ValueOf(v)) + if len(d.terrors) > terrlen { + issues := d.terrors[terrlen:] + d.terrors = d.terrors[:terrlen] + return &TypeError{issues} + } + return nil + }) + if e, ok := err.(*TypeError); ok { + d.terrors = append(d.terrors, e.Errors...) + return false + } + if err != nil { + fail(err) + } + return true +} + +// d.prepare initializes and dereferences pointers and calls UnmarshalYAML +// if a value is found to implement it. +// It returns the initialized and dereferenced out value, whether +// unmarshalling was already done by UnmarshalYAML, and if so whether +// its types unmarshalled appropriately. +// +// If n holds a null value, prepare returns before doing anything. +func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) { + if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "" && n.implicit) { + return out, false, false + } + again := true + for again { + again = false + if out.Kind() == reflect.Ptr { + if out.IsNil() { + out.Set(reflect.New(out.Type().Elem())) + } + out = out.Elem() + again = true + } + if out.CanAddr() { + if u, ok := out.Addr().Interface().(Unmarshaler); ok { + good = d.callUnmarshaler(n, u) + return out, true, good + } + } + } + return out, false, false +} + +func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) { + switch n.kind { + case documentNode: + return d.document(n, out) + case aliasNode: + return d.alias(n, out) + } + out, unmarshaled, good := d.prepare(n, out) + if unmarshaled { + return good + } + switch n.kind { + case scalarNode: + good = d.scalar(n, out) + case mappingNode: + good = d.mapping(n, out) + case sequenceNode: + good = d.sequence(n, out) + default: + panic("internal error: unknown node kind: " + strconv.Itoa(n.kind)) + } + return good +} + +func (d *decoder) document(n *node, out reflect.Value) (good bool) { + if len(n.children) == 1 { + d.doc = n + d.unmarshal(n.children[0], out) + return true + } + return false +} + +func (d *decoder) alias(n *node, out reflect.Value) (good bool) { + an, ok := d.doc.anchors[n.value] + if !ok { + failf("unknown anchor '%s' referenced", n.value) + } + if d.aliases[n.value] { + failf("anchor '%s' value contains itself", n.value) + } + d.aliases[n.value] = true + good = d.unmarshal(an, out) + delete(d.aliases, n.value) + return good +} + +var zeroValue reflect.Value + +func resetMap(out reflect.Value) { + for _, k := range out.MapKeys() { + out.SetMapIndex(k, zeroValue) + } +} + +func (d *decoder) scalar(n *node, out reflect.Value) (good bool) { + var tag string + var resolved interface{} + if n.tag == "" && !n.implicit { + tag = yaml_STR_TAG + resolved = n.value + } else { + tag, resolved = resolve(n.tag, n.value) + if tag == yaml_BINARY_TAG { + data, err := base64.StdEncoding.DecodeString(resolved.(string)) + if err != nil { + failf("!!binary value contains invalid base64 data") + } + resolved = string(data) + } + } + if resolved == nil { + if out.Kind() == reflect.Map && !out.CanAddr() { + resetMap(out) + } else { + out.Set(reflect.Zero(out.Type())) + } + return true + } + if s, ok := resolved.(string); ok && out.CanAddr() { + if u, ok := out.Addr().Interface().(encoding.TextUnmarshaler); ok { + err := u.UnmarshalText([]byte(s)) + if err != nil { + fail(err) + } + return true + } + } + switch out.Kind() { + case reflect.String: + if tag == yaml_BINARY_TAG { + out.SetString(resolved.(string)) + good = true + } else if resolved != nil { + out.SetString(n.value) + good = true + } + case reflect.Interface: + if resolved == nil { + out.Set(reflect.Zero(out.Type())) + } else { + out.Set(reflect.ValueOf(resolved)) + } + good = true + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + switch resolved := resolved.(type) { + case int: + if !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + good = true + } + case int64: + if !out.OverflowInt(resolved) { + out.SetInt(resolved) + good = true + } + case uint64: + if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + good = true + } + case float64: + if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + good = true + } + case string: + if out.Type() == durationType { + d, err := time.ParseDuration(resolved) + if err == nil { + out.SetInt(int64(d)) + good = true + } + } + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + switch resolved := resolved.(type) { + case int: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + good = true + } + case int64: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + good = true + } + case uint64: + if !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + good = true + } + case float64: + if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + good = true + } + } + case reflect.Bool: + switch resolved := resolved.(type) { + case bool: + out.SetBool(resolved) + good = true + } + case reflect.Float32, reflect.Float64: + switch resolved := resolved.(type) { + case int: + out.SetFloat(float64(resolved)) + good = true + case int64: + out.SetFloat(float64(resolved)) + good = true + case uint64: + out.SetFloat(float64(resolved)) + good = true + case float64: + out.SetFloat(resolved) + good = true + } + case reflect.Ptr: + if out.Type().Elem() == reflect.TypeOf(resolved) { + // TODO DOes this make sense? When is out a Ptr except when decoding a nil value? + elem := reflect.New(out.Type().Elem()) + elem.Elem().Set(reflect.ValueOf(resolved)) + out.Set(elem) + good = true + } + } + if !good { + d.terror(n, tag, out) + } + return good +} + +func settableValueOf(i interface{}) reflect.Value { + v := reflect.ValueOf(i) + sv := reflect.New(v.Type()).Elem() + sv.Set(v) + return sv +} + +func (d *decoder) sequence(n *node, out reflect.Value) (good bool) { + l := len(n.children) + + var iface reflect.Value + switch out.Kind() { + case reflect.Slice: + out.Set(reflect.MakeSlice(out.Type(), l, l)) + case reflect.Interface: + // No type hints. Will have to use a generic sequence. + iface = out + out = settableValueOf(make([]interface{}, l)) + default: + d.terror(n, yaml_SEQ_TAG, out) + return false + } + et := out.Type().Elem() + + j := 0 + for i := 0; i < l; i++ { + e := reflect.New(et).Elem() + if ok := d.unmarshal(n.children[i], e); ok { + out.Index(j).Set(e) + j++ + } + } + out.Set(out.Slice(0, j)) + if iface.IsValid() { + iface.Set(out) + } + return true +} + +func (d *decoder) mapping(n *node, out reflect.Value) (good bool) { + switch out.Kind() { + case reflect.Struct: + return d.mappingStruct(n, out) + case reflect.Slice: + return d.mappingSlice(n, out) + case reflect.Map: + // okay + case reflect.Interface: + if d.mapType.Kind() == reflect.Map { + iface := out + out = reflect.MakeMap(d.mapType) + iface.Set(out) + } else { + slicev := reflect.New(d.mapType).Elem() + if !d.mappingSlice(n, slicev) { + return false + } + out.Set(slicev) + return true + } + default: + d.terror(n, yaml_MAP_TAG, out) + return false + } + outt := out.Type() + kt := outt.Key() + et := outt.Elem() + + mapType := d.mapType + if outt.Key() == ifaceType && outt.Elem() == ifaceType { + d.mapType = outt + } + + if out.IsNil() { + out.Set(reflect.MakeMap(outt)) + } + l := len(n.children) + for i := 0; i < l; i += 2 { + if isMerge(n.children[i]) { + d.merge(n.children[i+1], out) + continue + } + k := reflect.New(kt).Elem() + if d.unmarshal(n.children[i], k) { + kkind := k.Kind() + if kkind == reflect.Interface { + kkind = k.Elem().Kind() + } + if kkind == reflect.Map || kkind == reflect.Slice { + failf("invalid map key: %#v", k.Interface()) + } + e := reflect.New(et).Elem() + if d.unmarshal(n.children[i+1], e) { + out.SetMapIndex(k, e) + } + } + } + d.mapType = mapType + return true +} + +func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) { + outt := out.Type() + if outt.Elem() != mapItemType { + d.terror(n, yaml_MAP_TAG, out) + return false + } + + mapType := d.mapType + d.mapType = outt + + var slice []MapItem + var l = len(n.children) + for i := 0; i < l; i += 2 { + if isMerge(n.children[i]) { + d.merge(n.children[i+1], out) + continue + } + item := MapItem{} + k := reflect.ValueOf(&item.Key).Elem() + if d.unmarshal(n.children[i], k) { + v := reflect.ValueOf(&item.Value).Elem() + if d.unmarshal(n.children[i+1], v) { + slice = append(slice, item) + } + } + } + out.Set(reflect.ValueOf(slice)) + d.mapType = mapType + return true +} + +func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) { + sinfo, err := getStructInfo(out.Type()) + if err != nil { + panic(err) + } + name := settableValueOf("") + l := len(n.children) + + var inlineMap reflect.Value + var elemType reflect.Type + if sinfo.InlineMap != -1 { + inlineMap = out.Field(sinfo.InlineMap) + inlineMap.Set(reflect.New(inlineMap.Type()).Elem()) + elemType = inlineMap.Type().Elem() + } + + for i := 0; i < l; i += 2 { + ni := n.children[i] + if isMerge(ni) { + d.merge(n.children[i+1], out) + continue + } + if !d.unmarshal(ni, name) { + continue + } + if info, ok := sinfo.FieldsMap[name.String()]; ok { + var field reflect.Value + if info.Inline == nil { + field = out.Field(info.Num) + } else { + field = out.FieldByIndex(info.Inline) + } + d.unmarshal(n.children[i+1], field) + } else if sinfo.InlineMap != -1 { + if inlineMap.IsNil() { + inlineMap.Set(reflect.MakeMap(inlineMap.Type())) + } + value := reflect.New(elemType).Elem() + d.unmarshal(n.children[i+1], value) + inlineMap.SetMapIndex(name, value) + } + } + return true +} + +func failWantMap() { + failf("map merge requires map or sequence of maps as the value") +} + +func (d *decoder) merge(n *node, out reflect.Value) { + switch n.kind { + case mappingNode: + d.unmarshal(n, out) + case aliasNode: + an, ok := d.doc.anchors[n.value] + if ok && an.kind != mappingNode { + failWantMap() + } + d.unmarshal(n, out) + case sequenceNode: + // Step backwards as earlier nodes take precedence. + for i := len(n.children) - 1; i >= 0; i-- { + ni := n.children[i] + if ni.kind == aliasNode { + an, ok := d.doc.anchors[ni.value] + if ok && an.kind != mappingNode { + failWantMap() + } + } else if ni.kind != mappingNode { + failWantMap() + } + d.unmarshal(ni, out) + } + default: + failWantMap() + } +} + +func isMerge(n *node) bool { + return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG) +} diff --git a/vendor/github.com/go-yaml/yaml/decode_test.go b/vendor/github.com/go-yaml/yaml/decode_test.go new file mode 100644 index 0000000000..a6fea0f20a --- /dev/null +++ b/vendor/github.com/go-yaml/yaml/decode_test.go @@ -0,0 +1,998 @@ +package yaml_test + +import ( + "errors" + . "gopkg.in/check.v1" + "gopkg.in/yaml.v2" + "math" + "net" + "reflect" + "strings" + "time" +) + +var unmarshalIntTest = 123 + +var unmarshalTests = []struct { + data string + value interface{} +}{ + { + "", + &struct{}{}, + }, { + "{}", &struct{}{}, + }, { + "v: hi", + map[string]string{"v": "hi"}, + }, { + "v: hi", map[string]interface{}{"v": "hi"}, + }, { + "v: true", + map[string]string{"v": "true"}, + }, { + "v: true", + map[string]interface{}{"v": true}, + }, { + "v: 10", + map[string]interface{}{"v": 10}, + }, { + "v: 0b10", + map[string]interface{}{"v": 2}, + }, { + "v: 0xA", + map[string]interface{}{"v": 10}, + }, { + "v: 4294967296", + map[string]int64{"v": 4294967296}, + }, { + "v: 0.1", + map[string]interface{}{"v": 0.1}, + }, { + "v: .1", + map[string]interface{}{"v": 0.1}, + }, { + "v: .Inf", + map[string]interface{}{"v": math.Inf(+1)}, + }, { + "v: -.Inf", + map[string]interface{}{"v": math.Inf(-1)}, + }, { + "v: -10", + map[string]interface{}{"v": -10}, + }, { + "v: -.1", + map[string]interface{}{"v": -0.1}, + }, + + // Simple values. + { + "123", + &unmarshalIntTest, + }, + + // Floats from spec + { + "canonical: 6.8523e+5", + map[string]interface{}{"canonical": 6.8523e+5}, + }, { + "expo: 685.230_15e+03", + map[string]interface{}{"expo": 685.23015e+03}, + }, { + "fixed: 685_230.15", + map[string]interface{}{"fixed": 685230.15}, + }, { + "neginf: -.inf", + map[string]interface{}{"neginf": math.Inf(-1)}, + }, { + "fixed: 685_230.15", + map[string]float64{"fixed": 685230.15}, + }, + //{"sexa: 190:20:30.15", map[string]interface{}{"sexa": 0}}, // Unsupported + //{"notanum: .NaN", map[string]interface{}{"notanum": math.NaN()}}, // Equality of NaN fails. + + // Bools from spec + { + "canonical: y", + map[string]interface{}{"canonical": true}, + }, { + "answer: NO", + map[string]interface{}{"answer": false}, + }, { + "logical: True", + map[string]interface{}{"logical": true}, + }, { + "option: on", + map[string]interface{}{"option": true}, + }, { + "option: on", + map[string]bool{"option": true}, + }, + // Ints from spec + { + "canonical: 685230", + map[string]interface{}{"canonical": 685230}, + }, { + "decimal: +685_230", + map[string]interface{}{"decimal": 685230}, + }, { + "octal: 02472256", + map[string]interface{}{"octal": 685230}, + }, { + "hexa: 0x_0A_74_AE", + map[string]interface{}{"hexa": 685230}, + }, { + "bin: 0b1010_0111_0100_1010_1110", + map[string]interface{}{"bin": 685230}, + }, { + "bin: -0b101010", + map[string]interface{}{"bin": -42}, + }, { + "decimal: +685_230", + map[string]int{"decimal": 685230}, + }, + + //{"sexa: 190:20:30", map[string]interface{}{"sexa": 0}}, // Unsupported + + // Nulls from spec + { + "empty:", + map[string]interface{}{"empty": nil}, + }, { + "canonical: ~", + map[string]interface{}{"canonical": nil}, + }, { + "english: null", + map[string]interface{}{"english": nil}, + }, { + "~: null key", + map[interface{}]string{nil: "null key"}, + }, { + "empty:", + map[string]*bool{"empty": nil}, + }, + + // Flow sequence + { + "seq: [A,B]", + map[string]interface{}{"seq": []interface{}{"A", "B"}}, + }, { + "seq: [A,B,C,]", + map[string][]string{"seq": []string{"A", "B", "C"}}, + }, { + "seq: [A,1,C]", + map[string][]string{"seq": []string{"A", "1", "C"}}, + }, { + "seq: [A,1,C]", + map[string][]int{"seq": []int{1}}, + }, { + "seq: [A,1,C]", + map[string]interface{}{"seq": []interface{}{"A", 1, "C"}}, + }, + // Block sequence + { + "seq:\n - A\n - B", + map[string]interface{}{"seq": []interface{}{"A", "B"}}, + }, { + "seq:\n - A\n - B\n - C", + map[string][]string{"seq": []string{"A", "B", "C"}}, + }, { + "seq:\n - A\n - 1\n - C", + map[string][]string{"seq": []string{"A", "1", "C"}}, + }, { + "seq:\n - A\n - 1\n - C", + map[string][]int{"seq": []int{1}}, + }, { + "seq:\n - A\n - 1\n - C", + map[string]interface{}{"seq": []interface{}{"A", 1, "C"}}, + }, + + // Literal block scalar + { + "scalar: | # Comment\n\n literal\n\n \ttext\n\n", + map[string]string{"scalar": "\nliteral\n\n\ttext\n"}, + }, + + // Folded block scalar + { + "scalar: > # Comment\n\n folded\n line\n \n next\n line\n * one\n * two\n\n last\n line\n\n", + map[string]string{"scalar": "\nfolded line\nnext line\n * one\n * two\n\nlast line\n"}, + }, + + // Map inside interface with no type hints. + { + "a: {b: c}", + map[interface{}]interface{}{"a": map[interface{}]interface{}{"b": "c"}}, + }, + + // Structs and type conversions. + { + "hello: world", + &struct{ Hello string }{"world"}, + }, { + "a: {b: c}", + &struct{ A struct{ B string } }{struct{ B string }{"c"}}, + }, { + "a: {b: c}", + &struct{ A *struct{ B string } }{&struct{ B string }{"c"}}, + }, { + "a: {b: c}", + &struct{ A map[string]string }{map[string]string{"b": "c"}}, + }, { + "a: {b: c}", + &struct{ A *map[string]string }{&map[string]string{"b": "c"}}, + }, { + "a:", + &struct{ A map[string]string }{}, + }, { + "a: 1", + &struct{ A int }{1}, + }, { + "a: 1", + &struct{ A float64 }{1}, + }, { + "a: 1.0", + &struct{ A int }{1}, + }, { + "a: 1.0", + &struct{ A uint }{1}, + }, { + "a: [1, 2]", + &struct{ A []int }{[]int{1, 2}}, + }, { + "a: 1", + &struct{ B int }{0}, + }, { + "a: 1", + &struct { + B int "a" + }{1}, + }, { + "a: y", + &struct{ A bool }{true}, + }, + + // Some cross type conversions + { + "v: 42", + map[string]uint{"v": 42}, + }, { + "v: -42", + map[string]uint{}, + }, { + "v: 4294967296", + map[string]uint64{"v": 4294967296}, + }, { + "v: -4294967296", + map[string]uint64{}, + }, + + // int + { + "int_max: 2147483647", + map[string]int{"int_max": math.MaxInt32}, + }, + { + "int_min: -2147483648", + map[string]int{"int_min": math.MinInt32}, + }, + { + "int_overflow: 9223372036854775808", // math.MaxInt64 + 1 + map[string]int{}, + }, + + // int64 + { + "int64_max: 9223372036854775807", + map[string]int64{"int64_max": math.MaxInt64}, + }, + { + "int64_max_base2: 0b111111111111111111111111111111111111111111111111111111111111111", + map[string]int64{"int64_max_base2": math.MaxInt64}, + }, + { + "int64_min: -9223372036854775808", + map[string]int64{"int64_min": math.MinInt64}, + }, + { + "int64_neg_base2: -0b111111111111111111111111111111111111111111111111111111111111111", + map[string]int64{"int64_neg_base2": -math.MaxInt64}, + }, + { + "int64_overflow: 9223372036854775808", // math.MaxInt64 + 1 + map[string]int64{}, + }, + + // uint + { + "uint_min: 0", + map[string]uint{"uint_min": 0}, + }, + { + "uint_max: 4294967295", + map[string]uint{"uint_max": math.MaxUint32}, + }, + { + "uint_underflow: -1", + map[string]uint{}, + }, + + // uint64 + { + "uint64_min: 0", + map[string]uint{"uint64_min": 0}, + }, + { + "uint64_max: 18446744073709551615", + map[string]uint64{"uint64_max": math.MaxUint64}, + }, + { + "uint64_max_base2: 0b1111111111111111111111111111111111111111111111111111111111111111", + map[string]uint64{"uint64_max_base2": math.MaxUint64}, + }, + { + "uint64_maxint64: 9223372036854775807", + map[string]uint64{"uint64_maxint64": math.MaxInt64}, + }, + { + "uint64_underflow: -1", + map[string]uint64{}, + }, + + // float32 + { + "float32_max: 3.40282346638528859811704183484516925440e+38", + map[string]float32{"float32_max": math.MaxFloat32}, + }, + { + "float32_nonzero: 1.401298464324817070923729583289916131280e-45", + map[string]float32{"float32_nonzero": math.SmallestNonzeroFloat32}, + }, + { + "float32_maxuint64: 18446744073709551615", + map[string]float32{"float32_maxuint64": float32(math.MaxUint64)}, + }, + { + "float32_maxuint64+1: 18446744073709551616", + map[string]float32{"float32_maxuint64+1": float32(math.MaxUint64 + 1)}, + }, + + // float64 + { + "float64_max: 1.797693134862315708145274237317043567981e+308", + map[string]float64{"float64_max": math.MaxFloat64}, + }, + { + "float64_nonzero: 4.940656458412465441765687928682213723651e-324", + map[string]float64{"float64_nonzero": math.SmallestNonzeroFloat64}, + }, + { + "float64_maxuint64: 18446744073709551615", + map[string]float64{"float64_maxuint64": float64(math.MaxUint64)}, + }, + { + "float64_maxuint64+1: 18446744073709551616", + map[string]float64{"float64_maxuint64+1": float64(math.MaxUint64 + 1)}, + }, + + // Overflow cases. + { + "v: 4294967297", + map[string]int32{}, + }, { + "v: 128", + map[string]int8{}, + }, + + // Quoted values. + { + "'1': '\"2\"'", + map[interface{}]interface{}{"1": "\"2\""}, + }, { + "v:\n- A\n- 'B\n\n C'\n", + map[string][]string{"v": []string{"A", "B\nC"}}, + }, + + // Explicit tags. + { + "v: !!float '1.1'", + map[string]interface{}{"v": 1.1}, + }, { + "v: !!null ''", + map[string]interface{}{"v": nil}, + }, { + "%TAG !y! tag:yaml.org,2002:\n---\nv: !y!int '1'", + map[string]interface{}{"v": 1}, + }, + + // Anchors and aliases. + { + "a: &x 1\nb: &y 2\nc: *x\nd: *y\n", + &struct{ A, B, C, D int }{1, 2, 1, 2}, + }, { + "a: &a {c: 1}\nb: *a", + &struct { + A, B struct { + C int + } + }{struct{ C int }{1}, struct{ C int }{1}}, + }, { + "a: &a [1, 2]\nb: *a", + &struct{ B []int }{[]int{1, 2}}, + }, { + "b: *a\na: &a {c: 1}", + &struct { + A, B struct { + C int + } + }{struct{ C int }{1}, struct{ C int }{1}}, + }, + + // Bug #1133337 + { + "foo: ''", + map[string]*string{"foo": new(string)}, + }, { + "foo: null", + map[string]string{"foo": ""}, + }, { + "foo: null", + map[string]interface{}{"foo": nil}, + }, + + // Ignored field + { + "a: 1\nb: 2\n", + &struct { + A int + B int "-" + }{1, 0}, + }, + + // Bug #1191981 + { + "" + + "%YAML 1.1\n" + + "--- !!str\n" + + `"Generic line break (no glyph)\n\` + "\n" + + ` Generic line break (glyphed)\n\` + "\n" + + ` Line separator\u2028\` + "\n" + + ` Paragraph separator\u2029"` + "\n", + "" + + "Generic line break (no glyph)\n" + + "Generic line break (glyphed)\n" + + "Line separator\u2028Paragraph separator\u2029", + }, + + // Struct inlining + { + "a: 1\nb: 2\nc: 3\n", + &struct { + A int + C inlineB `yaml:",inline"` + }{1, inlineB{2, inlineC{3}}}, + }, + + // Map inlining + { + "a: 1\nb: 2\nc: 3\n", + &struct { + A int + C map[string]int `yaml:",inline"` + }{1, map[string]int{"b": 2, "c": 3}}, + }, + + // bug 1243827 + { + "a: -b_c", + map[string]interface{}{"a": "-b_c"}, + }, + { + "a: +b_c", + map[string]interface{}{"a": "+b_c"}, + }, + { + "a: 50cent_of_dollar", + map[string]interface{}{"a": "50cent_of_dollar"}, + }, + + // Duration + { + "a: 3s", + map[string]time.Duration{"a": 3 * time.Second}, + }, + + // Issue #24. + { + "a: ", + map[string]string{"a": ""}, + }, + + // Base 60 floats are obsolete and unsupported. + { + "a: 1:1\n", + map[string]string{"a": "1:1"}, + }, + + // Binary data. + { + "a: !!binary gIGC\n", + map[string]string{"a": "\x80\x81\x82"}, + }, { + "a: !!binary |\n " + strings.Repeat("kJCQ", 17) + "kJ\n CQ\n", + map[string]string{"a": strings.Repeat("\x90", 54)}, + }, { + "a: !!binary |\n " + strings.Repeat("A", 70) + "\n ==\n", + map[string]string{"a": strings.Repeat("\x00", 52)}, + }, + + // Ordered maps. + { + "{b: 2, a: 1, d: 4, c: 3, sub: {e: 5}}", + &yaml.MapSlice{{"b", 2}, {"a", 1}, {"d", 4}, {"c", 3}, {"sub", yaml.MapSlice{{"e", 5}}}}, + }, + + // Issue #39. + { + "a:\n b:\n c: d\n", + map[string]struct{ B interface{} }{"a": {map[interface{}]interface{}{"c": "d"}}}, + }, + + // Custom map type. + { + "a: {b: c}", + M{"a": M{"b": "c"}}, + }, + + // Support encoding.TextUnmarshaler. + { + "a: 1.2.3.4\n", + map[string]net.IP{"a": net.IPv4(1, 2, 3, 4)}, + }, + { + "a: 2015-02-24T18:19:39Z\n", + map[string]time.Time{"a": time.Unix(1424801979, 0).In(time.UTC)}, + }, + + // Encode empty lists as zero-length slices. + { + "a: []", + &struct{ A []int }{[]int{}}, + }, + + // UTF-16-LE + { + "\xff\xfe\xf1\x00o\x00\xf1\x00o\x00:\x00 \x00v\x00e\x00r\x00y\x00 \x00y\x00e\x00s\x00\n\x00", + M{"ñoño": "very yes"}, + }, + // UTF-16-LE with surrogate. + { + "\xff\xfe\xf1\x00o\x00\xf1\x00o\x00:\x00 \x00v\x00e\x00r\x00y\x00 \x00y\x00e\x00s\x00 \x00=\xd8\xd4\xdf\n\x00", + M{"ñoño": "very yes 🟔"}, + }, + + // UTF-16-BE + { + "\xfe\xff\x00\xf1\x00o\x00\xf1\x00o\x00:\x00 \x00v\x00e\x00r\x00y\x00 \x00y\x00e\x00s\x00\n", + M{"ñoño": "very yes"}, + }, + // UTF-16-BE with surrogate. + { + "\xfe\xff\x00\xf1\x00o\x00\xf1\x00o\x00:\x00 \x00v\x00e\x00r\x00y\x00 \x00y\x00e\x00s\x00 \xd8=\xdf\xd4\x00\n", + M{"ñoño": "very yes 🟔"}, + }, + + // YAML Float regex shouldn't match this + { + "a: 123456e1\n", + M{"a": "123456e1"}, + }, { + "a: 123456E1\n", + M{"a": "123456E1"}, + }, +} + +type M map[interface{}]interface{} + +type inlineB struct { + B int + inlineC `yaml:",inline"` +} + +type inlineC struct { + C int +} + +func (s *S) TestUnmarshal(c *C) { + for _, item := range unmarshalTests { + t := reflect.ValueOf(item.value).Type() + var value interface{} + switch t.Kind() { + case reflect.Map: + value = reflect.MakeMap(t).Interface() + case reflect.String: + value = reflect.New(t).Interface() + case reflect.Ptr: + value = reflect.New(t.Elem()).Interface() + default: + c.Fatalf("missing case for %s", t) + } + err := yaml.Unmarshal([]byte(item.data), value) + if _, ok := err.(*yaml.TypeError); !ok { + c.Assert(err, IsNil) + } + if t.Kind() == reflect.String { + c.Assert(*value.(*string), Equals, item.value) + } else { + c.Assert(value, DeepEquals, item.value) + } + } +} + +func (s *S) TestUnmarshalNaN(c *C) { + value := map[string]interface{}{} + err := yaml.Unmarshal([]byte("notanum: .NaN"), &value) + c.Assert(err, IsNil) + c.Assert(math.IsNaN(value["notanum"].(float64)), Equals, true) +} + +var unmarshalErrorTests = []struct { + data, error string +}{ + {"v: !!float 'error'", "yaml: cannot decode !!str `error` as a !!float"}, + {"v: [A,", "yaml: line 1: did not find expected node content"}, + {"v:\n- [A,", "yaml: line 2: did not find expected node content"}, + {"a: *b\n", "yaml: unknown anchor 'b' referenced"}, + {"a: &a\n b: *a\n", "yaml: anchor 'a' value contains itself"}, + {"value: -", "yaml: block sequence entries are not allowed in this context"}, + {"a: !!binary ==", "yaml: !!binary value contains invalid base64 data"}, + {"{[.]}", `yaml: invalid map key: \[\]interface \{\}\{"\."\}`}, + {"{{.}}", `yaml: invalid map key: map\[interface\ \{\}\]interface \{\}\{".":interface \{\}\(nil\)\}`}, +} + +func (s *S) TestUnmarshalErrors(c *C) { + for _, item := range unmarshalErrorTests { + var value interface{} + err := yaml.Unmarshal([]byte(item.data), &value) + c.Assert(err, ErrorMatches, item.error, Commentf("Partial unmarshal: %#v", value)) + } +} + +var unmarshalerTests = []struct { + data, tag string + value interface{} +}{ + {"_: {hi: there}", "!!map", map[interface{}]interface{}{"hi": "there"}}, + {"_: [1,A]", "!!seq", []interface{}{1, "A"}}, + {"_: 10", "!!int", 10}, + {"_: null", "!!null", nil}, + {`_: BAR!`, "!!str", "BAR!"}, + {`_: "BAR!"`, "!!str", "BAR!"}, + {"_: !!foo 'BAR!'", "!!foo", "BAR!"}, + {`_: ""`, "!!str", ""}, +} + +var unmarshalerResult = map[int]error{} + +type unmarshalerType struct { + value interface{} +} + +func (o *unmarshalerType) UnmarshalYAML(unmarshal func(v interface{}) error) error { + if err := unmarshal(&o.value); err != nil { + return err + } + if i, ok := o.value.(int); ok { + if result, ok := unmarshalerResult[i]; ok { + return result + } + } + return nil +} + +type unmarshalerPointer struct { + Field *unmarshalerType "_" +} + +type unmarshalerValue struct { + Field unmarshalerType "_" +} + +func (s *S) TestUnmarshalerPointerField(c *C) { + for _, item := range unmarshalerTests { + obj := &unmarshalerPointer{} + err := yaml.Unmarshal([]byte(item.data), obj) + c.Assert(err, IsNil) + if item.value == nil { + c.Assert(obj.Field, IsNil) + } else { + c.Assert(obj.Field, NotNil, Commentf("Pointer not initialized (%#v)", item.value)) + c.Assert(obj.Field.value, DeepEquals, item.value) + } + } +} + +func (s *S) TestUnmarshalerValueField(c *C) { + for _, item := range unmarshalerTests { + obj := &unmarshalerValue{} + err := yaml.Unmarshal([]byte(item.data), obj) + c.Assert(err, IsNil) + c.Assert(obj.Field, NotNil, Commentf("Pointer not initialized (%#v)", item.value)) + c.Assert(obj.Field.value, DeepEquals, item.value) + } +} + +func (s *S) TestUnmarshalerWholeDocument(c *C) { + obj := &unmarshalerType{} + err := yaml.Unmarshal([]byte(unmarshalerTests[0].data), obj) + c.Assert(err, IsNil) + value, ok := obj.value.(map[interface{}]interface{}) + c.Assert(ok, Equals, true, Commentf("value: %#v", obj.value)) + c.Assert(value["_"], DeepEquals, unmarshalerTests[0].value) +} + +func (s *S) TestUnmarshalerTypeError(c *C) { + unmarshalerResult[2] = &yaml.TypeError{[]string{"foo"}} + unmarshalerResult[4] = &yaml.TypeError{[]string{"bar"}} + defer func() { + delete(unmarshalerResult, 2) + delete(unmarshalerResult, 4) + }() + + type T struct { + Before int + After int + M map[string]*unmarshalerType + } + var v T + data := `{before: A, m: {abc: 1, def: 2, ghi: 3, jkl: 4}, after: B}` + err := yaml.Unmarshal([]byte(data), &v) + c.Assert(err, ErrorMatches, ""+ + "yaml: unmarshal errors:\n"+ + " line 1: cannot unmarshal !!str `A` into int\n"+ + " foo\n"+ + " bar\n"+ + " line 1: cannot unmarshal !!str `B` into int") + c.Assert(v.M["abc"], NotNil) + c.Assert(v.M["def"], IsNil) + c.Assert(v.M["ghi"], NotNil) + c.Assert(v.M["jkl"], IsNil) + + c.Assert(v.M["abc"].value, Equals, 1) + c.Assert(v.M["ghi"].value, Equals, 3) +} + +type proxyTypeError struct{} + +func (v *proxyTypeError) UnmarshalYAML(unmarshal func(interface{}) error) error { + var s string + var a int32 + var b int64 + if err := unmarshal(&s); err != nil { + panic(err) + } + if s == "a" { + if err := unmarshal(&b); err == nil { + panic("should have failed") + } + return unmarshal(&a) + } + if err := unmarshal(&a); err == nil { + panic("should have failed") + } + return unmarshal(&b) +} + +func (s *S) TestUnmarshalerTypeErrorProxying(c *C) { + type T struct { + Before int + After int + M map[string]*proxyTypeError + } + var v T + data := `{before: A, m: {abc: a, def: b}, after: B}` + err := yaml.Unmarshal([]byte(data), &v) + c.Assert(err, ErrorMatches, ""+ + "yaml: unmarshal errors:\n"+ + " line 1: cannot unmarshal !!str `A` into int\n"+ + " line 1: cannot unmarshal !!str `a` into int32\n"+ + " line 1: cannot unmarshal !!str `b` into int64\n"+ + " line 1: cannot unmarshal !!str `B` into int") +} + +type failingUnmarshaler struct{} + +var failingErr = errors.New("failingErr") + +func (ft *failingUnmarshaler) UnmarshalYAML(unmarshal func(interface{}) error) error { + return failingErr +} + +func (s *S) TestUnmarshalerError(c *C) { + err := yaml.Unmarshal([]byte("a: b"), &failingUnmarshaler{}) + c.Assert(err, Equals, failingErr) +} + +type sliceUnmarshaler []int + +func (su *sliceUnmarshaler) UnmarshalYAML(unmarshal func(interface{}) error) error { + var slice []int + err := unmarshal(&slice) + if err == nil { + *su = slice + return nil + } + + var intVal int + err = unmarshal(&intVal) + if err == nil { + *su = []int{intVal} + return nil + } + + return err +} + +func (s *S) TestUnmarshalerRetry(c *C) { + var su sliceUnmarshaler + err := yaml.Unmarshal([]byte("[1, 2, 3]"), &su) + c.Assert(err, IsNil) + c.Assert(su, DeepEquals, sliceUnmarshaler([]int{1, 2, 3})) + + err = yaml.Unmarshal([]byte("1"), &su) + c.Assert(err, IsNil) + c.Assert(su, DeepEquals, sliceUnmarshaler([]int{1})) +} + +// From http://yaml.org/type/merge.html +var mergeTests = ` +anchors: + list: + - &CENTER { "x": 1, "y": 2 } + - &LEFT { "x": 0, "y": 2 } + - &BIG { "r": 10 } + - &SMALL { "r": 1 } + +# All the following maps are equal: + +plain: + # Explicit keys + "x": 1 + "y": 2 + "r": 10 + label: center/big + +mergeOne: + # Merge one map + << : *CENTER + "r": 10 + label: center/big + +mergeMultiple: + # Merge multiple maps + << : [ *CENTER, *BIG ] + label: center/big + +override: + # Override + << : [ *BIG, *LEFT, *SMALL ] + "x": 1 + label: center/big + +shortTag: + # Explicit short merge tag + !!merge "<<" : [ *CENTER, *BIG ] + label: center/big + +longTag: + # Explicit merge long tag + ! "<<" : [ *CENTER, *BIG ] + label: center/big + +inlineMap: + # Inlined map + << : {"x": 1, "y": 2, "r": 10} + label: center/big + +inlineSequenceMap: + # Inlined map in sequence + << : [ *CENTER, {"r": 10} ] + label: center/big +` + +func (s *S) TestMerge(c *C) { + var want = map[interface{}]interface{}{ + "x": 1, + "y": 2, + "r": 10, + "label": "center/big", + } + + var m map[interface{}]interface{} + err := yaml.Unmarshal([]byte(mergeTests), &m) + c.Assert(err, IsNil) + for name, test := range m { + if name == "anchors" { + continue + } + c.Assert(test, DeepEquals, want, Commentf("test %q failed", name)) + } +} + +func (s *S) TestMergeStruct(c *C) { + type Data struct { + X, Y, R int + Label string + } + want := Data{1, 2, 10, "center/big"} + + var m map[string]Data + err := yaml.Unmarshal([]byte(mergeTests), &m) + c.Assert(err, IsNil) + for name, test := range m { + if name == "anchors" { + continue + } + c.Assert(test, Equals, want, Commentf("test %q failed", name)) + } +} + +var unmarshalNullTests = []func() interface{}{ + func() interface{} { var v interface{}; v = "v"; return &v }, + func() interface{} { var s = "s"; return &s }, + func() interface{} { var s = "s"; sptr := &s; return &sptr }, + func() interface{} { var i = 1; return &i }, + func() interface{} { var i = 1; iptr := &i; return &iptr }, + func() interface{} { m := map[string]int{"s": 1}; return &m }, + func() interface{} { m := map[string]int{"s": 1}; return m }, +} + +func (s *S) TestUnmarshalNull(c *C) { + for _, test := range unmarshalNullTests { + item := test() + zero := reflect.Zero(reflect.TypeOf(item).Elem()).Interface() + err := yaml.Unmarshal([]byte("null"), item) + c.Assert(err, IsNil) + if reflect.TypeOf(item).Kind() == reflect.Map { + c.Assert(reflect.ValueOf(item).Interface(), DeepEquals, reflect.MakeMap(reflect.TypeOf(item)).Interface()) + } else { + c.Assert(reflect.ValueOf(item).Elem().Interface(), DeepEquals, zero) + } + } +} + +func (s *S) TestUnmarshalSliceOnPreset(c *C) { + // Issue #48. + v := struct{ A []int }{[]int{1}} + yaml.Unmarshal([]byte("a: [2]"), &v) + c.Assert(v.A, DeepEquals, []int{2}) +} + +//var data []byte +//func init() { +// var err error +// data, err = ioutil.ReadFile("/tmp/file.yaml") +// if err != nil { +// panic(err) +// } +//} +// +//func (s *S) BenchmarkUnmarshal(c *C) { +// var err error +// for i := 0; i < c.N; i++ { +// var v map[string]interface{} +// err = yaml.Unmarshal(data, &v) +// } +// if err != nil { +// panic(err) +// } +//} +// +//func (s *S) BenchmarkMarshal(c *C) { +// var v map[string]interface{} +// yaml.Unmarshal(data, &v) +// c.ResetTimer() +// for i := 0; i < c.N; i++ { +// yaml.Marshal(&v) +// } +//} diff --git a/vendor/github.com/go-yaml/yaml/emitterc.go b/vendor/github.com/go-yaml/yaml/emitterc.go new file mode 100644 index 0000000000..6ecdcb3c7f --- /dev/null +++ b/vendor/github.com/go-yaml/yaml/emitterc.go @@ -0,0 +1,1684 @@ +package yaml + +import ( + "bytes" +) + +// Flush the buffer if needed. +func flush(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) { + return yaml_emitter_flush(emitter) + } + return true +} + +// Put a character to the output buffer. +func put(emitter *yaml_emitter_t, value byte) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + emitter.buffer[emitter.buffer_pos] = value + emitter.buffer_pos++ + emitter.column++ + return true +} + +// Put a line break to the output buffer. +func put_break(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + switch emitter.line_break { + case yaml_CR_BREAK: + emitter.buffer[emitter.buffer_pos] = '\r' + emitter.buffer_pos += 1 + case yaml_LN_BREAK: + emitter.buffer[emitter.buffer_pos] = '\n' + emitter.buffer_pos += 1 + case yaml_CRLN_BREAK: + emitter.buffer[emitter.buffer_pos+0] = '\r' + emitter.buffer[emitter.buffer_pos+1] = '\n' + emitter.buffer_pos += 2 + default: + panic("unknown line break setting") + } + emitter.column = 0 + emitter.line++ + return true +} + +// Copy a character from a string into buffer. +func write(emitter *yaml_emitter_t, s []byte, i *int) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + p := emitter.buffer_pos + w := width(s[*i]) + switch w { + case 4: + emitter.buffer[p+3] = s[*i+3] + fallthrough + case 3: + emitter.buffer[p+2] = s[*i+2] + fallthrough + case 2: + emitter.buffer[p+1] = s[*i+1] + fallthrough + case 1: + emitter.buffer[p+0] = s[*i+0] + default: + panic("unknown character width") + } + emitter.column++ + emitter.buffer_pos += w + *i += w + return true +} + +// Write a whole string into buffer. +func write_all(emitter *yaml_emitter_t, s []byte) bool { + for i := 0; i < len(s); { + if !write(emitter, s, &i) { + return false + } + } + return true +} + +// Copy a line break character from a string into buffer. +func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool { + if s[*i] == '\n' { + if !put_break(emitter) { + return false + } + *i++ + } else { + if !write(emitter, s, i) { + return false + } + emitter.column = 0 + emitter.line++ + } + return true +} + +// Set an emitter error and return false. +func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_EMITTER_ERROR + emitter.problem = problem + return false +} + +// Emit an event. +func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.events = append(emitter.events, *event) + for !yaml_emitter_need_more_events(emitter) { + event := &emitter.events[emitter.events_head] + if !yaml_emitter_analyze_event(emitter, event) { + return false + } + if !yaml_emitter_state_machine(emitter, event) { + return false + } + yaml_event_delete(event) + emitter.events_head++ + } + return true +} + +// Check if we need to accumulate more events before emitting. +// +// We accumulate extra +// - 1 event for DOCUMENT-START +// - 2 events for SEQUENCE-START +// - 3 events for MAPPING-START +// +func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool { + if emitter.events_head == len(emitter.events) { + return true + } + var accumulate int + switch emitter.events[emitter.events_head].typ { + case yaml_DOCUMENT_START_EVENT: + accumulate = 1 + break + case yaml_SEQUENCE_START_EVENT: + accumulate = 2 + break + case yaml_MAPPING_START_EVENT: + accumulate = 3 + break + default: + return false + } + if len(emitter.events)-emitter.events_head > accumulate { + return false + } + var level int + for i := emitter.events_head; i < len(emitter.events); i++ { + switch emitter.events[i].typ { + case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT: + level++ + case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT: + level-- + } + if level == 0 { + return false + } + } + return true +} + +// Append a directive to the directives stack. +func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool { + for i := 0; i < len(emitter.tag_directives); i++ { + if bytes.Equal(value.handle, emitter.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive") + } + } + + // [Go] Do we actually need to copy this given garbage collection + // and the lack of deallocating destructors? + tag_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(tag_copy.handle, value.handle) + copy(tag_copy.prefix, value.prefix) + emitter.tag_directives = append(emitter.tag_directives, tag_copy) + return true +} + +// Increase the indentation level. +func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool { + emitter.indents = append(emitter.indents, emitter.indent) + if emitter.indent < 0 { + if flow { + emitter.indent = emitter.best_indent + } else { + emitter.indent = 0 + } + } else if !indentless { + emitter.indent += emitter.best_indent + } + return true +} + +// State dispatcher. +func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool { + switch emitter.state { + default: + case yaml_EMIT_STREAM_START_STATE: + return yaml_emitter_emit_stream_start(emitter, event) + + case yaml_EMIT_FIRST_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, true) + + case yaml_EMIT_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, false) + + case yaml_EMIT_DOCUMENT_CONTENT_STATE: + return yaml_emitter_emit_document_content(emitter, event) + + case yaml_EMIT_DOCUMENT_END_STATE: + return yaml_emitter_emit_document_end(emitter, event) + + case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, true) + + case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, false) + + case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, true) + + case yaml_EMIT_FLOW_MAPPING_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, false) + + case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, true) + + case yaml_EMIT_FLOW_MAPPING_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, false) + + case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, true) + + case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, false) + + case yaml_EMIT_END_STATE: + return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END") + } + panic("invalid emitter state") +} + +// Expect STREAM-START. +func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_STREAM_START_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START") + } + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = event.encoding + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = yaml_UTF8_ENCODING + } + } + if emitter.best_indent < 2 || emitter.best_indent > 9 { + emitter.best_indent = 2 + } + if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 { + emitter.best_width = 80 + } + if emitter.best_width < 0 { + emitter.best_width = 1<<31 - 1 + } + if emitter.line_break == yaml_ANY_BREAK { + emitter.line_break = yaml_LN_BREAK + } + + emitter.indent = -1 + emitter.line = 0 + emitter.column = 0 + emitter.whitespace = true + emitter.indention = true + + if emitter.encoding != yaml_UTF8_ENCODING { + if !yaml_emitter_write_bom(emitter) { + return false + } + } + emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE + return true +} + +// Expect DOCUMENT-START or STREAM-END. +func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + + if event.typ == yaml_DOCUMENT_START_EVENT { + + if event.version_directive != nil { + if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) { + return false + } + } + + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) { + return false + } + if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) { + return false + } + } + + for i := 0; i < len(default_tag_directives); i++ { + tag_directive := &default_tag_directives[i] + if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) { + return false + } + } + + implicit := event.implicit + if !first || emitter.canonical { + implicit = false + } + + if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if event.version_directive != nil { + implicit = false + if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if len(event.tag_directives) > 0 { + implicit = false + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) { + return false + } + if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + if yaml_emitter_check_empty_document(emitter) { + implicit = false + } + if !implicit { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) { + return false + } + if emitter.canonical { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE + return true + } + + if event.typ == yaml_STREAM_END_EVENT { + if emitter.open_ended { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_END_STATE + return true + } + + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END") +} + +// Expect the root node. +func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE) + return yaml_emitter_emit_node(emitter, event, true, false, false, false) +} + +// Expect DOCUMENT-END. +func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_DOCUMENT_END_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END") + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !event.implicit { + // [Go] Allocate the slice elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_DOCUMENT_START_STATE + emitter.tag_directives = emitter.tag_directives[:0] + return true +} + +// Expect a flow item node. +func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_SEQUENCE_END_EVENT { + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + + return true + } + + if !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE) + return yaml_emitter_emit_node(emitter, event, false, true, false, false) +} + +// Expect a flow key node. +func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_MAPPING_END_EVENT { + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + + if !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if !emitter.canonical && yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a flow value node. +func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a block item node. +func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, emitter.mapping_context && !emitter.indention) { + return false + } + } + if event.typ == yaml_SEQUENCE_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE) + return yaml_emitter_emit_node(emitter, event, false, true, false, false) +} + +// Expect a block key node. +func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, false) { + return false + } + } + if event.typ == yaml_MAPPING_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a block value node. +func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a node. +func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t, + root bool, sequence bool, mapping bool, simple_key bool) bool { + + emitter.root_context = root + emitter.sequence_context = sequence + emitter.mapping_context = mapping + emitter.simple_key_context = simple_key + + switch event.typ { + case yaml_ALIAS_EVENT: + return yaml_emitter_emit_alias(emitter, event) + case yaml_SCALAR_EVENT: + return yaml_emitter_emit_scalar(emitter, event) + case yaml_SEQUENCE_START_EVENT: + return yaml_emitter_emit_sequence_start(emitter, event) + case yaml_MAPPING_START_EVENT: + return yaml_emitter_emit_mapping_start(emitter, event) + default: + return yaml_emitter_set_emitter_error(emitter, + "expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS") + } +} + +// Expect ALIAS. +func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SCALAR. +func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_select_scalar_style(emitter, event) { + return false + } + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + if !yaml_emitter_process_scalar(emitter) { + return false + } + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SEQUENCE-START. +func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE || + yaml_emitter_check_empty_sequence(emitter) { + emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE + } + return true +} + +// Expect MAPPING-START. +func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE || + yaml_emitter_check_empty_mapping(emitter) { + emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE + } + return true +} + +// Check if the document content is an empty scalar. +func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool { + return false // [Go] Huh? +} + +// Check if the next events represent an empty sequence. +func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT +} + +// Check if the next events represent an empty mapping. +func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT +} + +// Check if the next node can be expressed as a simple key. +func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool { + length := 0 + switch emitter.events[emitter.events_head].typ { + case yaml_ALIAS_EVENT: + length += len(emitter.anchor_data.anchor) + case yaml_SCALAR_EVENT: + if emitter.scalar_data.multiline { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + + len(emitter.scalar_data.value) + case yaml_SEQUENCE_START_EVENT: + if !yaml_emitter_check_empty_sequence(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + case yaml_MAPPING_START_EVENT: + if !yaml_emitter_check_empty_mapping(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + default: + return false + } + return length <= 128 +} + +// Determine an acceptable scalar style. +func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 + if no_tag && !event.implicit && !event.quoted_implicit { + return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified") + } + + style := event.scalar_style() + if style == yaml_ANY_SCALAR_STYLE { + style = yaml_PLAIN_SCALAR_STYLE + } + if emitter.canonical { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + if emitter.simple_key_context && emitter.scalar_data.multiline { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + + if style == yaml_PLAIN_SCALAR_STYLE { + if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed || + emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if no_tag && !event.implicit { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_SINGLE_QUOTED_SCALAR_STYLE { + if !emitter.scalar_data.single_quoted_allowed { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE { + if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + + if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE { + emitter.tag_data.handle = []byte{'!'} + } + emitter.scalar_data.style = style + return true +} + +// Write an achor. +func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool { + if emitter.anchor_data.anchor == nil { + return true + } + c := []byte{'&'} + if emitter.anchor_data.alias { + c[0] = '*' + } + if !yaml_emitter_write_indicator(emitter, c, true, false, false) { + return false + } + return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor) +} + +// Write a tag. +func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool { + if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 { + return true + } + if len(emitter.tag_data.handle) > 0 { + if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) { + return false + } + if len(emitter.tag_data.suffix) > 0 { + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + } + } else { + // [Go] Allocate these slices elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) { + return false + } + } + return true +} + +// Write a scalar. +func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool { + switch emitter.scalar_data.style { + case yaml_PLAIN_SCALAR_STYLE: + return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_SINGLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_DOUBLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_LITERAL_SCALAR_STYLE: + return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value) + + case yaml_FOLDED_SCALAR_STYLE: + return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value) + } + panic("unknown scalar style") +} + +// Check if a %YAML directive is valid. +func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool { + if version_directive.major != 1 || version_directive.minor != 1 { + return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive") + } + return true +} + +// Check if a %TAG directive is valid. +func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool { + handle := tag_directive.handle + prefix := tag_directive.prefix + if len(handle) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty") + } + if handle[0] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'") + } + if handle[len(handle)-1] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'") + } + for i := 1; i < len(handle)-1; i += width(handle[i]) { + if !is_alpha(handle, i) { + return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only") + } + } + if len(prefix) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty") + } + return true +} + +// Check if an anchor is valid. +func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool { + if len(anchor) == 0 { + problem := "anchor value must not be empty" + if alias { + problem = "alias value must not be empty" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + for i := 0; i < len(anchor); i += width(anchor[i]) { + if !is_alpha(anchor, i) { + problem := "anchor value must contain alphanumerical characters only" + if alias { + problem = "alias value must contain alphanumerical characters only" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + } + emitter.anchor_data.anchor = anchor + emitter.anchor_data.alias = alias + return true +} + +// Check if a tag is valid. +func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool { + if len(tag) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty") + } + for i := 0; i < len(emitter.tag_directives); i++ { + tag_directive := &emitter.tag_directives[i] + if bytes.HasPrefix(tag, tag_directive.prefix) { + emitter.tag_data.handle = tag_directive.handle + emitter.tag_data.suffix = tag[len(tag_directive.prefix):] + return true + } + } + emitter.tag_data.suffix = tag + return true +} + +// Check if a scalar is valid. +func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { + var ( + block_indicators = false + flow_indicators = false + line_breaks = false + special_characters = false + + leading_space = false + leading_break = false + trailing_space = false + trailing_break = false + break_space = false + space_break = false + + preceeded_by_whitespace = false + followed_by_whitespace = false + previous_space = false + previous_break = false + ) + + emitter.scalar_data.value = value + + if len(value) == 0 { + emitter.scalar_data.multiline = false + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = false + return true + } + + if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) { + block_indicators = true + flow_indicators = true + } + + preceeded_by_whitespace = true + for i, w := 0, 0; i < len(value); i += w { + w = width(value[i]) + followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w) + + if i == 0 { + switch value[i] { + case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`': + flow_indicators = true + block_indicators = true + case '?', ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '-': + if followed_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } else { + switch value[i] { + case ',', '?', '[', ']', '{', '}': + flow_indicators = true + case ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '#': + if preceeded_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } + + if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode { + special_characters = true + } + if is_space(value, i) { + if i == 0 { + leading_space = true + } + if i+width(value[i]) == len(value) { + trailing_space = true + } + if previous_break { + break_space = true + } + previous_space = true + previous_break = false + } else if is_break(value, i) { + line_breaks = true + if i == 0 { + leading_break = true + } + if i+width(value[i]) == len(value) { + trailing_break = true + } + if previous_space { + space_break = true + } + previous_space = false + previous_break = true + } else { + previous_space = false + previous_break = false + } + + // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition. + preceeded_by_whitespace = is_blankz(value, i) + } + + emitter.scalar_data.multiline = line_breaks + emitter.scalar_data.flow_plain_allowed = true + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = true + + if leading_space || leading_break || trailing_space || trailing_break { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if trailing_space { + emitter.scalar_data.block_allowed = false + } + if break_space { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + } + if space_break || special_characters { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + emitter.scalar_data.block_allowed = false + } + if line_breaks { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if flow_indicators { + emitter.scalar_data.flow_plain_allowed = false + } + if block_indicators { + emitter.scalar_data.block_plain_allowed = false + } + return true +} + +// Check if the event data is valid. +func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + emitter.anchor_data.anchor = nil + emitter.tag_data.handle = nil + emitter.tag_data.suffix = nil + emitter.scalar_data.value = nil + + switch event.typ { + case yaml_ALIAS_EVENT: + if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) { + return false + } + + case yaml_SCALAR_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + if !yaml_emitter_analyze_scalar(emitter, event.value) { + return false + } + + case yaml_SEQUENCE_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + + case yaml_MAPPING_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + } + return true +} + +// Write the BOM character. +func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool { + if !flush(emitter) { + return false + } + pos := emitter.buffer_pos + emitter.buffer[pos+0] = '\xEF' + emitter.buffer[pos+1] = '\xBB' + emitter.buffer[pos+2] = '\xBF' + emitter.buffer_pos += 3 + return true +} + +func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool { + indent := emitter.indent + if indent < 0 { + indent = 0 + } + if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) { + if !put_break(emitter) { + return false + } + } + for emitter.column < indent { + if !put(emitter, ' ') { + return false + } + } + emitter.whitespace = true + emitter.indention = true + return true +} + +func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, indicator) { + return false + } + emitter.whitespace = is_whitespace + emitter.indention = (emitter.indention && is_indention) + emitter.open_ended = false + return true +} + +func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool { + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool { + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + for i := 0; i < len(value); { + var must_write bool + switch value[i] { + case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']': + must_write = true + default: + must_write = is_alpha(value, i) + } + if must_write { + if !write(emitter, value, &i) { + return false + } + } else { + w := width(value[i]) + for k := 0; k < w; k++ { + octet := value[i] + i++ + if !put(emitter, '%') { + return false + } + + c := octet >> 4 + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + + c = octet & 0x0f + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + } + } + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + + emitter.whitespace = false + emitter.indention = false + if emitter.root_context { + emitter.open_ended = true + } + + return true +} + +func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) { + return false + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if value[i] == '\'' { + if !put(emitter, '\'') { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + spaces := false + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) { + return false + } + + for i := 0; i < len(value); { + if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) || + is_bom(value, i) || is_break(value, i) || + value[i] == '"' || value[i] == '\\' { + + octet := value[i] + + var w int + var v rune + switch { + case octet&0x80 == 0x00: + w, v = 1, rune(octet&0x7F) + case octet&0xE0 == 0xC0: + w, v = 2, rune(octet&0x1F) + case octet&0xF0 == 0xE0: + w, v = 3, rune(octet&0x0F) + case octet&0xF8 == 0xF0: + w, v = 4, rune(octet&0x07) + } + for k := 1; k < w; k++ { + octet = value[i+k] + v = (v << 6) + (rune(octet) & 0x3F) + } + i += w + + if !put(emitter, '\\') { + return false + } + + var ok bool + switch v { + case 0x00: + ok = put(emitter, '0') + case 0x07: + ok = put(emitter, 'a') + case 0x08: + ok = put(emitter, 'b') + case 0x09: + ok = put(emitter, 't') + case 0x0A: + ok = put(emitter, 'n') + case 0x0b: + ok = put(emitter, 'v') + case 0x0c: + ok = put(emitter, 'f') + case 0x0d: + ok = put(emitter, 'r') + case 0x1b: + ok = put(emitter, 'e') + case 0x22: + ok = put(emitter, '"') + case 0x5c: + ok = put(emitter, '\\') + case 0x85: + ok = put(emitter, 'N') + case 0xA0: + ok = put(emitter, '_') + case 0x2028: + ok = put(emitter, 'L') + case 0x2029: + ok = put(emitter, 'P') + default: + if v <= 0xFF { + ok = put(emitter, 'x') + w = 2 + } else if v <= 0xFFFF { + ok = put(emitter, 'u') + w = 4 + } else { + ok = put(emitter, 'U') + w = 8 + } + for k := (w - 1) * 4; ok && k >= 0; k -= 4 { + digit := byte((v >> uint(k)) & 0x0F) + if digit < 10 { + ok = put(emitter, digit+'0') + } else { + ok = put(emitter, digit+'A'-10) + } + } + } + if !ok { + return false + } + spaces = false + } else if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 { + if !yaml_emitter_write_indent(emitter) { + return false + } + if is_space(value, i+1) { + if !put(emitter, '\\') { + return false + } + } + i += width(value[i]) + } else if !write(emitter, value, &i) { + return false + } + spaces = true + } else { + if !write(emitter, value, &i) { + return false + } + spaces = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool { + if is_space(value, 0) || is_break(value, 0) { + indent_hint := []byte{'0' + byte(emitter.best_indent)} + if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) { + return false + } + } + + emitter.open_ended = false + + var chomp_hint [1]byte + if len(value) == 0 { + chomp_hint[0] = '-' + } else { + i := len(value) - 1 + for value[i]&0xC0 == 0x80 { + i-- + } + if !is_break(value, i) { + chomp_hint[0] = '-' + } else if i == 0 { + chomp_hint[0] = '+' + emitter.open_ended = true + } else { + i-- + for value[i]&0xC0 == 0x80 { + i-- + } + if is_break(value, i) { + chomp_hint[0] = '+' + emitter.open_ended = true + } + } + } + if chomp_hint[0] != 0 { + if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) { + return false + } + } + return true +} + +func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + if !put_break(emitter) { + return false + } + emitter.indention = true + emitter.whitespace = true + breaks := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + breaks = false + } + } + + return true +} + +func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + + if !put_break(emitter) { + return false + } + emitter.indention = true + emitter.whitespace = true + + breaks := true + leading_spaces := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !breaks && !leading_spaces && value[i] == '\n' { + k := 0 + for is_break(value, k) { + k += width(value[k]) + } + if !is_blankz(value, k) { + if !put_break(emitter) { + return false + } + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + leading_spaces = is_blank(value, i) + } + if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + emitter.indention = false + breaks = false + } + } + return true +} diff --git a/vendor/github.com/go-yaml/yaml/encode.go b/vendor/github.com/go-yaml/yaml/encode.go new file mode 100644 index 0000000000..84f8499551 --- /dev/null +++ b/vendor/github.com/go-yaml/yaml/encode.go @@ -0,0 +1,306 @@ +package yaml + +import ( + "encoding" + "fmt" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "time" +) + +type encoder struct { + emitter yaml_emitter_t + event yaml_event_t + out []byte + flow bool +} + +func newEncoder() (e *encoder) { + e = &encoder{} + e.must(yaml_emitter_initialize(&e.emitter)) + yaml_emitter_set_output_string(&e.emitter, &e.out) + yaml_emitter_set_unicode(&e.emitter, true) + e.must(yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING)) + e.emit() + e.must(yaml_document_start_event_initialize(&e.event, nil, nil, true)) + e.emit() + return e +} + +func (e *encoder) finish() { + e.must(yaml_document_end_event_initialize(&e.event, true)) + e.emit() + e.emitter.open_ended = false + e.must(yaml_stream_end_event_initialize(&e.event)) + e.emit() +} + +func (e *encoder) destroy() { + yaml_emitter_delete(&e.emitter) +} + +func (e *encoder) emit() { + // This will internally delete the e.event value. + if !yaml_emitter_emit(&e.emitter, &e.event) && e.event.typ != yaml_DOCUMENT_END_EVENT && e.event.typ != yaml_STREAM_END_EVENT { + e.must(false) + } +} + +func (e *encoder) must(ok bool) { + if !ok { + msg := e.emitter.problem + if msg == "" { + msg = "unknown problem generating YAML content" + } + failf("%s", msg) + } +} + +func (e *encoder) marshal(tag string, in reflect.Value) { + if !in.IsValid() { + e.nilv() + return + } + iface := in.Interface() + if m, ok := iface.(Marshaler); ok { + v, err := m.MarshalYAML() + if err != nil { + fail(err) + } + if v == nil { + e.nilv() + return + } + in = reflect.ValueOf(v) + } else if m, ok := iface.(encoding.TextMarshaler); ok { + text, err := m.MarshalText() + if err != nil { + fail(err) + } + in = reflect.ValueOf(string(text)) + } + switch in.Kind() { + case reflect.Interface: + if in.IsNil() { + e.nilv() + } else { + e.marshal(tag, in.Elem()) + } + case reflect.Map: + e.mapv(tag, in) + case reflect.Ptr: + if in.IsNil() { + e.nilv() + } else { + e.marshal(tag, in.Elem()) + } + case reflect.Struct: + e.structv(tag, in) + case reflect.Slice: + if in.Type().Elem() == mapItemType { + e.itemsv(tag, in) + } else { + e.slicev(tag, in) + } + case reflect.String: + e.stringv(tag, in) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + if in.Type() == durationType { + e.stringv(tag, reflect.ValueOf(iface.(time.Duration).String())) + } else { + e.intv(tag, in) + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + e.uintv(tag, in) + case reflect.Float32, reflect.Float64: + e.floatv(tag, in) + case reflect.Bool: + e.boolv(tag, in) + default: + panic("cannot marshal type: " + in.Type().String()) + } +} + +func (e *encoder) mapv(tag string, in reflect.Value) { + e.mappingv(tag, func() { + keys := keyList(in.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + e.marshal("", k) + e.marshal("", in.MapIndex(k)) + } + }) +} + +func (e *encoder) itemsv(tag string, in reflect.Value) { + e.mappingv(tag, func() { + slice := in.Convert(reflect.TypeOf([]MapItem{})).Interface().([]MapItem) + for _, item := range slice { + e.marshal("", reflect.ValueOf(item.Key)) + e.marshal("", reflect.ValueOf(item.Value)) + } + }) +} + +func (e *encoder) structv(tag string, in reflect.Value) { + sinfo, err := getStructInfo(in.Type()) + if err != nil { + panic(err) + } + e.mappingv(tag, func() { + for _, info := range sinfo.FieldsList { + var value reflect.Value + if info.Inline == nil { + value = in.Field(info.Num) + } else { + value = in.FieldByIndex(info.Inline) + } + if info.OmitEmpty && isZero(value) { + continue + } + e.marshal("", reflect.ValueOf(info.Key)) + e.flow = info.Flow + e.marshal("", value) + } + if sinfo.InlineMap >= 0 { + m := in.Field(sinfo.InlineMap) + if m.Len() > 0 { + e.flow = false + keys := keyList(m.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + if _, found := sinfo.FieldsMap[k.String()]; found { + panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", k.String())) + } + e.marshal("", k) + e.flow = false + e.marshal("", m.MapIndex(k)) + } + } + } + }) +} + +func (e *encoder) mappingv(tag string, f func()) { + implicit := tag == "" + style := yaml_BLOCK_MAPPING_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_MAPPING_STYLE + } + e.must(yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) + e.emit() + f() + e.must(yaml_mapping_end_event_initialize(&e.event)) + e.emit() +} + +func (e *encoder) slicev(tag string, in reflect.Value) { + implicit := tag == "" + style := yaml_BLOCK_SEQUENCE_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_SEQUENCE_STYLE + } + e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) + e.emit() + n := in.Len() + for i := 0; i < n; i++ { + e.marshal("", in.Index(i)) + } + e.must(yaml_sequence_end_event_initialize(&e.event)) + e.emit() +} + +// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1. +// +// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported +// in YAML 1.2 and by this package, but these should be marshalled quoted for +// the time being for compatibility with other parsers. +func isBase60Float(s string) (result bool) { + // Fast path. + if s == "" { + return false + } + c := s[0] + if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 { + return false + } + // Do the full match. + return base60float.MatchString(s) +} + +// From http://yaml.org/type/float.html, except the regular expression there +// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix. +var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`) + +func (e *encoder) stringv(tag string, in reflect.Value) { + var style yaml_scalar_style_t + s := in.String() + rtag, rs := resolve("", s) + if rtag == yaml_BINARY_TAG { + if tag == "" || tag == yaml_STR_TAG { + tag = rtag + s = rs.(string) + } else if tag == yaml_BINARY_TAG { + failf("explicitly tagged !!binary data must be base64-encoded") + } else { + failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag)) + } + } + if tag == "" && (rtag != yaml_STR_TAG || isBase60Float(s)) { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } else if strings.Contains(s, "\n") { + style = yaml_LITERAL_SCALAR_STYLE + } else { + style = yaml_PLAIN_SCALAR_STYLE + } + e.emitScalar(s, "", tag, style) +} + +func (e *encoder) boolv(tag string, in reflect.Value) { + var s string + if in.Bool() { + s = "true" + } else { + s = "false" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) intv(tag string, in reflect.Value) { + s := strconv.FormatInt(in.Int(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) uintv(tag string, in reflect.Value) { + s := strconv.FormatUint(in.Uint(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) floatv(tag string, in reflect.Value) { + // FIXME: Handle 64 bits here. + s := strconv.FormatFloat(float64(in.Float()), 'g', -1, 32) + switch s { + case "+Inf": + s = ".inf" + case "-Inf": + s = "-.inf" + case "NaN": + s = ".nan" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) nilv() { + e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) { + implicit := tag == "" + e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style)) + e.emit() +} diff --git a/vendor/github.com/go-yaml/yaml/encode_test.go b/vendor/github.com/go-yaml/yaml/encode_test.go new file mode 100644 index 0000000000..84099bd385 --- /dev/null +++ b/vendor/github.com/go-yaml/yaml/encode_test.go @@ -0,0 +1,501 @@ +package yaml_test + +import ( + "fmt" + "math" + "strconv" + "strings" + "time" + + . "gopkg.in/check.v1" + "gopkg.in/yaml.v2" + "net" + "os" +) + +var marshalIntTest = 123 + +var marshalTests = []struct { + value interface{} + data string +}{ + { + nil, + "null\n", + }, { + &struct{}{}, + "{}\n", + }, { + map[string]string{"v": "hi"}, + "v: hi\n", + }, { + map[string]interface{}{"v": "hi"}, + "v: hi\n", + }, { + map[string]string{"v": "true"}, + "v: \"true\"\n", + }, { + map[string]string{"v": "false"}, + "v: \"false\"\n", + }, { + map[string]interface{}{"v": true}, + "v: true\n", + }, { + map[string]interface{}{"v": false}, + "v: false\n", + }, { + map[string]interface{}{"v": 10}, + "v: 10\n", + }, { + map[string]interface{}{"v": -10}, + "v: -10\n", + }, { + map[string]uint{"v": 42}, + "v: 42\n", + }, { + map[string]interface{}{"v": int64(4294967296)}, + "v: 4294967296\n", + }, { + map[string]int64{"v": int64(4294967296)}, + "v: 4294967296\n", + }, { + map[string]uint64{"v": 4294967296}, + "v: 4294967296\n", + }, { + map[string]interface{}{"v": "10"}, + "v: \"10\"\n", + }, { + map[string]interface{}{"v": 0.1}, + "v: 0.1\n", + }, { + map[string]interface{}{"v": float64(0.1)}, + "v: 0.1\n", + }, { + map[string]interface{}{"v": -0.1}, + "v: -0.1\n", + }, { + map[string]interface{}{"v": math.Inf(+1)}, + "v: .inf\n", + }, { + map[string]interface{}{"v": math.Inf(-1)}, + "v: -.inf\n", + }, { + map[string]interface{}{"v": math.NaN()}, + "v: .nan\n", + }, { + map[string]interface{}{"v": nil}, + "v: null\n", + }, { + map[string]interface{}{"v": ""}, + "v: \"\"\n", + }, { + map[string][]string{"v": []string{"A", "B"}}, + "v:\n- A\n- B\n", + }, { + map[string][]string{"v": []string{"A", "B\nC"}}, + "v:\n- A\n- |-\n B\n C\n", + }, { + map[string][]interface{}{"v": []interface{}{"A", 1, map[string][]int{"B": []int{2, 3}}}}, + "v:\n- A\n- 1\n- B:\n - 2\n - 3\n", + }, { + map[string]interface{}{"a": map[interface{}]interface{}{"b": "c"}}, + "a:\n b: c\n", + }, { + map[string]interface{}{"a": "-"}, + "a: '-'\n", + }, + + // Simple values. + { + &marshalIntTest, + "123\n", + }, + + // Structures + { + &struct{ Hello string }{"world"}, + "hello: world\n", + }, { + &struct { + A struct { + B string + } + }{struct{ B string }{"c"}}, + "a:\n b: c\n", + }, { + &struct { + A *struct { + B string + } + }{&struct{ B string }{"c"}}, + "a:\n b: c\n", + }, { + &struct { + A *struct { + B string + } + }{}, + "a: null\n", + }, { + &struct{ A int }{1}, + "a: 1\n", + }, { + &struct{ A []int }{[]int{1, 2}}, + "a:\n- 1\n- 2\n", + }, { + &struct { + B int "a" + }{1}, + "a: 1\n", + }, { + &struct{ A bool }{true}, + "a: true\n", + }, + + // Conditional flag + { + &struct { + A int "a,omitempty" + B int "b,omitempty" + }{1, 0}, + "a: 1\n", + }, { + &struct { + A int "a,omitempty" + B int "b,omitempty" + }{0, 0}, + "{}\n", + }, { + &struct { + A *struct{ X, y int } "a,omitempty,flow" + }{&struct{ X, y int }{1, 2}}, + "a: {x: 1}\n", + }, { + &struct { + A *struct{ X, y int } "a,omitempty,flow" + }{nil}, + "{}\n", + }, { + &struct { + A *struct{ X, y int } "a,omitempty,flow" + }{&struct{ X, y int }{}}, + "a: {x: 0}\n", + }, { + &struct { + A struct{ X, y int } "a,omitempty,flow" + }{struct{ X, y int }{1, 2}}, + "a: {x: 1}\n", + }, { + &struct { + A struct{ X, y int } "a,omitempty,flow" + }{struct{ X, y int }{0, 1}}, + "{}\n", + }, { + &struct { + A float64 "a,omitempty" + B float64 "b,omitempty" + }{1, 0}, + "a: 1\n", + }, + + // Flow flag + { + &struct { + A []int "a,flow" + }{[]int{1, 2}}, + "a: [1, 2]\n", + }, { + &struct { + A map[string]string "a,flow" + }{map[string]string{"b": "c", "d": "e"}}, + "a: {b: c, d: e}\n", + }, { + &struct { + A struct { + B, D string + } "a,flow" + }{struct{ B, D string }{"c", "e"}}, + "a: {b: c, d: e}\n", + }, + + // Unexported field + { + &struct { + u int + A int + }{0, 1}, + "a: 1\n", + }, + + // Ignored field + { + &struct { + A int + B int "-" + }{1, 2}, + "a: 1\n", + }, + + // Struct inlining + { + &struct { + A int + C inlineB `yaml:",inline"` + }{1, inlineB{2, inlineC{3}}}, + "a: 1\nb: 2\nc: 3\n", + }, + + // Map inlining + { + &struct { + A int + C map[string]int `yaml:",inline"` + }{1, map[string]int{"b": 2, "c": 3}}, + "a: 1\nb: 2\nc: 3\n", + }, + + // Duration + { + map[string]time.Duration{"a": 3 * time.Second}, + "a: 3s\n", + }, + + // Issue #24: bug in map merging logic. + { + map[string]string{"a": ""}, + "a: \n", + }, + + // Issue #34: marshal unsupported base 60 floats quoted for compatibility + // with old YAML 1.1 parsers. + { + map[string]string{"a": "1:1"}, + "a: \"1:1\"\n", + }, + + // Binary data. + { + map[string]string{"a": "\x00"}, + "a: \"\\0\"\n", + }, { + map[string]string{"a": "\x80\x81\x82"}, + "a: !!binary gIGC\n", + }, { + map[string]string{"a": strings.Repeat("\x90", 54)}, + "a: !!binary |\n " + strings.Repeat("kJCQ", 17) + "kJ\n CQ\n", + }, + + // Ordered maps. + { + &yaml.MapSlice{{"b", 2}, {"a", 1}, {"d", 4}, {"c", 3}, {"sub", yaml.MapSlice{{"e", 5}}}}, + "b: 2\na: 1\nd: 4\nc: 3\nsub:\n e: 5\n", + }, + + // Encode unicode as utf-8 rather than in escaped form. + { + map[string]string{"a": "你好"}, + "a: 你好\n", + }, + + // Support encoding.TextMarshaler. + { + map[string]net.IP{"a": net.IPv4(1, 2, 3, 4)}, + "a: 1.2.3.4\n", + }, + { + map[string]time.Time{"a": time.Unix(1424801979, 0)}, + "a: 2015-02-24T18:19:39Z\n", + }, + + // Ensure strings containing ": " are quoted (reported as PR #43, but not reproducible). + { + map[string]string{"a": "b: c"}, + "a: 'b: c'\n", + }, + + // Containing hash mark ('#') in string should be quoted + { + map[string]string{"a": "Hello #comment"}, + "a: 'Hello #comment'\n", + }, + { + map[string]string{"a": "你好 #comment"}, + "a: '你好 #comment'\n", + }, +} + +func (s *S) TestMarshal(c *C) { + defer os.Setenv("TZ", os.Getenv("TZ")) + os.Setenv("TZ", "UTC") + for _, item := range marshalTests { + data, err := yaml.Marshal(item.value) + c.Assert(err, IsNil) + c.Assert(string(data), Equals, item.data) + } +} + +var marshalErrorTests = []struct { + value interface{} + error string + panic string +}{{ + value: &struct { + B int + inlineB ",inline" + }{1, inlineB{2, inlineC{3}}}, + panic: `Duplicated key 'b' in struct struct \{ B int; .*`, +}, { + value: &struct { + A int + B map[string]int ",inline" + }{1, map[string]int{"a": 2}}, + panic: `Can't have key "a" in inlined map; conflicts with struct field`, +}} + +func (s *S) TestMarshalErrors(c *C) { + for _, item := range marshalErrorTests { + if item.panic != "" { + c.Assert(func() { yaml.Marshal(item.value) }, PanicMatches, item.panic) + } else { + _, err := yaml.Marshal(item.value) + c.Assert(err, ErrorMatches, item.error) + } + } +} + +func (s *S) TestMarshalTypeCache(c *C) { + var data []byte + var err error + func() { + type T struct{ A int } + data, err = yaml.Marshal(&T{}) + c.Assert(err, IsNil) + }() + func() { + type T struct{ B int } + data, err = yaml.Marshal(&T{}) + c.Assert(err, IsNil) + }() + c.Assert(string(data), Equals, "b: 0\n") +} + +var marshalerTests = []struct { + data string + value interface{} +}{ + {"_:\n hi: there\n", map[interface{}]interface{}{"hi": "there"}}, + {"_:\n- 1\n- A\n", []interface{}{1, "A"}}, + {"_: 10\n", 10}, + {"_: null\n", nil}, + {"_: BAR!\n", "BAR!"}, +} + +type marshalerType struct { + value interface{} +} + +func (o marshalerType) MarshalText() ([]byte, error) { + panic("MarshalText called on type with MarshalYAML") +} + +func (o marshalerType) MarshalYAML() (interface{}, error) { + return o.value, nil +} + +type marshalerValue struct { + Field marshalerType "_" +} + +func (s *S) TestMarshaler(c *C) { + for _, item := range marshalerTests { + obj := &marshalerValue{} + obj.Field.value = item.value + data, err := yaml.Marshal(obj) + c.Assert(err, IsNil) + c.Assert(string(data), Equals, string(item.data)) + } +} + +func (s *S) TestMarshalerWholeDocument(c *C) { + obj := &marshalerType{} + obj.value = map[string]string{"hello": "world!"} + data, err := yaml.Marshal(obj) + c.Assert(err, IsNil) + c.Assert(string(data), Equals, "hello: world!\n") +} + +type failingMarshaler struct{} + +func (ft *failingMarshaler) MarshalYAML() (interface{}, error) { + return nil, failingErr +} + +func (s *S) TestMarshalerError(c *C) { + _, err := yaml.Marshal(&failingMarshaler{}) + c.Assert(err, Equals, failingErr) +} + +func (s *S) TestSortedOutput(c *C) { + order := []interface{}{ + false, + true, + 1, + uint(1), + 1.0, + 1.1, + 1.2, + 2, + uint(2), + 2.0, + 2.1, + "", + ".1", + ".2", + ".a", + "1", + "2", + "a!10", + "a/2", + "a/10", + "a~10", + "ab/1", + "b/1", + "b/01", + "b/2", + "b/02", + "b/3", + "b/03", + "b1", + "b01", + "b3", + "c2.10", + "c10.2", + "d1", + "d12", + "d12a", + } + m := make(map[interface{}]int) + for _, k := range order { + m[k] = 1 + } + data, err := yaml.Marshal(m) + c.Assert(err, IsNil) + out := "\n" + string(data) + last := 0 + for i, k := range order { + repr := fmt.Sprint(k) + if s, ok := k.(string); ok { + if _, err = strconv.ParseFloat(repr, 32); s == "" || err == nil { + repr = `"` + repr + `"` + } + } + index := strings.Index(out, "\n"+repr+":") + if index == -1 { + c.Fatalf("%#v is not in the output: %#v", k, out) + } + if index < last { + c.Fatalf("%#v was generated before %#v: %q", k, order[i-1], out) + } + last = index + } +} diff --git a/vendor/github.com/go-yaml/yaml/parserc.go b/vendor/github.com/go-yaml/yaml/parserc.go new file mode 100644 index 0000000000..81d05dfe57 --- /dev/null +++ b/vendor/github.com/go-yaml/yaml/parserc.go @@ -0,0 +1,1095 @@ +package yaml + +import ( + "bytes" +) + +// The parser implements the following grammar: +// +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// implicit_document ::= block_node DOCUMENT-END* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// block_node_or_indentless_sequence ::= +// ALIAS +// | properties (block_content | indentless_block_sequence)? +// | block_content +// | indentless_block_sequence +// block_node ::= ALIAS +// | properties block_content? +// | block_content +// flow_node ::= ALIAS +// | properties flow_content? +// | flow_content +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// block_content ::= block_collection | flow_collection | SCALAR +// flow_content ::= flow_collection | SCALAR +// block_collection ::= block_sequence | block_mapping +// flow_collection ::= flow_sequence | flow_mapping +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// block_mapping ::= BLOCK-MAPPING_START +// ((KEY block_node_or_indentless_sequence?)? +// (VALUE block_node_or_indentless_sequence?)?)* +// BLOCK-END +// flow_sequence ::= FLOW-SEQUENCE-START +// (flow_sequence_entry FLOW-ENTRY)* +// flow_sequence_entry? +// FLOW-SEQUENCE-END +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// flow_mapping ::= FLOW-MAPPING-START +// (flow_mapping_entry FLOW-ENTRY)* +// flow_mapping_entry? +// FLOW-MAPPING-END +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + +// Peek the next token in the token queue. +func peek_token(parser *yaml_parser_t) *yaml_token_t { + if parser.token_available || yaml_parser_fetch_more_tokens(parser) { + return &parser.tokens[parser.tokens_head] + } + return nil +} + +// Remove the next token from the queue (must be called after peek_token). +func skip_token(parser *yaml_parser_t) { + parser.token_available = false + parser.tokens_parsed++ + parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN + parser.tokens_head++ +} + +// Get the next event. +func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool { + // Erase the event object. + *event = yaml_event_t{} + + // No events after the end of the stream or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE { + return true + } + + // Generate the next event. + return yaml_parser_state_machine(parser, event) +} + +// Set parser error. +func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +// State dispatcher. +func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool { + //trace("yaml_parser_state_machine", "state:", parser.state.String()) + + switch parser.state { + case yaml_PARSE_STREAM_START_STATE: + return yaml_parser_parse_stream_start(parser, event) + + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, true) + + case yaml_PARSE_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, false) + + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return yaml_parser_parse_document_content(parser, event) + + case yaml_PARSE_DOCUMENT_END_STATE: + return yaml_parser_parse_document_end(parser, event) + + case yaml_PARSE_BLOCK_NODE_STATE: + return yaml_parser_parse_node(parser, event, true, false) + + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return yaml_parser_parse_node(parser, event, true, true) + + case yaml_PARSE_FLOW_NODE_STATE: + return yaml_parser_parse_node(parser, event, false, false) + + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, true) + + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, false) + + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_indentless_sequence_entry(parser, event) + + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, true) + + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, false) + + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return yaml_parser_parse_block_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, true) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, false) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event) + + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, true) + + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, true) + + default: + panic("invalid parser state") + } +} + +// Parse the production: +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// ************ +func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_STREAM_START_TOKEN { + return yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark) + } + parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + encoding: token.encoding, + } + skip_token(parser) + return true +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// * +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// ************************* +func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool { + + token := peek_token(parser) + if token == nil { + return false + } + + // Parse extra document end indicators. + if !implicit { + for token.typ == yaml_DOCUMENT_END_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN && + token.typ != yaml_TAG_DIRECTIVE_TOKEN && + token.typ != yaml_DOCUMENT_START_TOKEN && + token.typ != yaml_STREAM_END_TOKEN { + // Parse an implicit document. + if !yaml_parser_process_directives(parser, nil, nil) { + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_BLOCK_NODE_STATE + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + } else if token.typ != yaml_STREAM_END_TOKEN { + // Parse an explicit document. + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + start_mark := token.start_mark + if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) { + return false + } + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_DOCUMENT_START_TOKEN { + yaml_parser_set_parser_error(parser, + "did not find expected ", token.start_mark) + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE + end_mark := token.end_mark + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: false, + } + skip_token(parser) + + } else { + // Parse the stream end. + parser.state = yaml_PARSE_END_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + } + + return true +} + +// Parse the productions: +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// *********** +// +func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN || + token.typ == yaml_TAG_DIRECTIVE_TOKEN || + token.typ == yaml_DOCUMENT_START_TOKEN || + token.typ == yaml_DOCUMENT_END_TOKEN || + token.typ == yaml_STREAM_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + return yaml_parser_process_empty_scalar(parser, event, + token.start_mark) + } + return yaml_parser_parse_node(parser, event, true, false) +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// ************* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// +func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + start_mark := token.start_mark + end_mark := token.start_mark + + implicit := true + if token.typ == yaml_DOCUMENT_END_TOKEN { + end_mark = token.end_mark + skip_token(parser) + implicit = false + } + + parser.tag_directives = parser.tag_directives[:0] + + parser.state = yaml_PARSE_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + start_mark: start_mark, + end_mark: end_mark, + implicit: implicit, + } + return true +} + +// Parse the productions: +// block_node_or_indentless_sequence ::= +// ALIAS +// ***** +// | properties (block_content | indentless_block_sequence)? +// ********** * +// | block_content | indentless_block_sequence +// * +// block_node ::= ALIAS +// ***** +// | properties block_content? +// ********** * +// | block_content +// * +// flow_node ::= ALIAS +// ***** +// | properties flow_content? +// ********** * +// | flow_content +// * +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// ************************* +// block_content ::= block_collection | flow_collection | SCALAR +// ****** +// flow_content ::= flow_collection | SCALAR +// ****** +func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool { + //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)() + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_ALIAS_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + *event = yaml_event_t{ + typ: yaml_ALIAS_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + anchor: token.value, + } + skip_token(parser) + return true + } + + start_mark := token.start_mark + end_mark := token.start_mark + + var tag_token bool + var tag_handle, tag_suffix, anchor []byte + var tag_mark yaml_mark_t + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + start_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } else if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + start_mark = token.start_mark + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + var tag []byte + if tag_token { + if len(tag_handle) == 0 { + tag = tag_suffix + tag_suffix = nil + } else { + for i := range parser.tag_directives { + if bytes.Equal(parser.tag_directives[i].handle, tag_handle) { + tag = append([]byte(nil), parser.tag_directives[i].prefix...) + tag = append(tag, tag_suffix...) + break + } + } + if len(tag) == 0 { + yaml_parser_set_parser_error_context(parser, + "while parsing a node", start_mark, + "found undefined tag handle", tag_mark) + return false + } + } + } + + implicit := len(tag) == 0 + if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_SCALAR_TOKEN { + var plain_implicit, quoted_implicit bool + end_mark = token.end_mark + if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') { + plain_implicit = true + } else if len(tag) == 0 { + quoted_implicit = true + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + value: token.value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(token.style), + } + skip_token(parser) + return true + } + if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN { + // [Go] Some of the events below can be merged as they differ only on style. + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_FLOW_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + return true + } + if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE), + } + return true + } + if len(anchor) > 0 || len(tag) > 0 { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + quoted_implicit: false, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true + } + + context := "while parsing a flow node" + if block { + context = "while parsing a block node" + } + yaml_parser_set_parser_error_context(parser, context, start_mark, + "did not find expected node content", token.start_mark) + return false +} + +// Parse the productions: +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// ******************** *********** * ********* +// +func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } else { + parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } + if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block collection", context_mark, + "did not find expected '-' indicator", token.start_mark) +} + +// Parse the productions: +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// *********** * +func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && + token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark? + } + return true +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// ******************* +// ((KEY block_node_or_indentless_sequence?)? +// *** * +// (VALUE block_node_or_indentless_sequence?)?)* +// +// BLOCK-END +// ********* +// +func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_KEY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } else { + parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } else if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block mapping", context_mark, + "did not find expected key", token.start_mark) +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// +// ((KEY block_node_or_indentless_sequence?)? +// +// (VALUE block_node_or_indentless_sequence?)?)* +// ***** * +// BLOCK-END +// +// +func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence ::= FLOW-SEQUENCE-START +// ******************* +// (flow_sequence_entry FLOW-ENTRY)* +// * ********** +// flow_sequence_entry? +// * +// FLOW-SEQUENCE-END +// ***************** +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow sequence", context_mark, + "did not find expected ',' or ']'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + implicit: true, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + skip_token(parser) + return true + } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true +} + +// +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// *** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + mark := token.end_mark + skip_token(parser) + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// ***** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be end_mark? + } + return true +} + +// Parse the productions: +// flow_mapping ::= FLOW-MAPPING-START +// ****************** +// (flow_mapping_entry FLOW-ENTRY)* +// * ********** +// flow_mapping_entry? +// ****************** +// FLOW-MAPPING-END +// **************** +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * *** * +// +func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow mapping", context_mark, + "did not find expected ',' or '}'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } else { + parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + return true +} + +// Parse the productions: +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * ***** * +// +func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool { + token := peek_token(parser) + if token == nil { + return false + } + if empty { + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Generate an empty scalar event. +func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: mark, + end_mark: mark, + value: nil, // Empty + implicit: true, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true +} + +var default_tag_directives = []yaml_tag_directive_t{ + {[]byte("!"), []byte("!")}, + {[]byte("!!"), []byte("tag:yaml.org,2002:")}, +} + +// Parse directives. +func yaml_parser_process_directives(parser *yaml_parser_t, + version_directive_ref **yaml_version_directive_t, + tag_directives_ref *[]yaml_tag_directive_t) bool { + + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + + token := peek_token(parser) + if token == nil { + return false + } + + for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN { + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN { + if version_directive != nil { + yaml_parser_set_parser_error(parser, + "found duplicate %YAML directive", token.start_mark) + return false + } + if token.major != 1 || token.minor != 1 { + yaml_parser_set_parser_error(parser, + "found incompatible YAML document", token.start_mark) + return false + } + version_directive = &yaml_version_directive_t{ + major: token.major, + minor: token.minor, + } + } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN { + value := yaml_tag_directive_t{ + handle: token.value, + prefix: token.prefix, + } + if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) { + return false + } + tag_directives = append(tag_directives, value) + } + + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + + for i := range default_tag_directives { + if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) { + return false + } + } + + if version_directive_ref != nil { + *version_directive_ref = version_directive + } + if tag_directives_ref != nil { + *tag_directives_ref = tag_directives + } + return true +} + +// Append a tag directive to the directives stack. +func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool { + for i := range parser.tag_directives { + if bytes.Equal(value.handle, parser.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark) + } + } + + // [Go] I suspect the copy is unnecessary. This was likely done + // because there was no way to track ownership of the data. + value_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(value_copy.handle, value.handle) + copy(value_copy.prefix, value.prefix) + parser.tag_directives = append(parser.tag_directives, value_copy) + return true +} diff --git a/vendor/github.com/go-yaml/yaml/readerc.go b/vendor/github.com/go-yaml/yaml/readerc.go new file mode 100644 index 0000000000..f450791717 --- /dev/null +++ b/vendor/github.com/go-yaml/yaml/readerc.go @@ -0,0 +1,394 @@ +package yaml + +import ( + "io" +) + +// Set the reader error and return 0. +func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool { + parser.error = yaml_READER_ERROR + parser.problem = problem + parser.problem_offset = offset + parser.problem_value = value + return false +} + +// Byte order marks. +const ( + bom_UTF8 = "\xef\xbb\xbf" + bom_UTF16LE = "\xff\xfe" + bom_UTF16BE = "\xfe\xff" +) + +// Determine the input stream encoding by checking the BOM symbol. If no BOM is +// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure. +func yaml_parser_determine_encoding(parser *yaml_parser_t) bool { + // Ensure that we had enough bytes in the raw buffer. + for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 { + if !yaml_parser_update_raw_buffer(parser) { + return false + } + } + + // Determine the encoding. + buf := parser.raw_buffer + pos := parser.raw_buffer_pos + avail := len(buf) - pos + if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] { + parser.encoding = yaml_UTF16LE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] { + parser.encoding = yaml_UTF16BE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] { + parser.encoding = yaml_UTF8_ENCODING + parser.raw_buffer_pos += 3 + parser.offset += 3 + } else { + parser.encoding = yaml_UTF8_ENCODING + } + return true +} + +// Update the raw buffer. +func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool { + size_read := 0 + + // Return if the raw buffer is full. + if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) { + return true + } + + // Return on EOF. + if parser.eof { + return true + } + + // Move the remaining bytes in the raw buffer to the beginning. + if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) { + copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:]) + } + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos] + parser.raw_buffer_pos = 0 + + // Call the read handler to fill the buffer. + size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)]) + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read] + if err == io.EOF { + parser.eof = true + } else if err != nil { + return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1) + } + return true +} + +// Ensure that the buffer contains at least `length` characters. +// Return true on success, false on failure. +// +// The length is supposed to be significantly less that the buffer size. +func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { + if parser.read_handler == nil { + panic("read handler must be set") + } + + // If the EOF flag is set and the raw buffer is empty, do nothing. + if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { + return true + } + + // Return if the buffer contains enough characters. + if parser.unread >= length { + return true + } + + // Determine the input encoding if it is not known yet. + if parser.encoding == yaml_ANY_ENCODING { + if !yaml_parser_determine_encoding(parser) { + return false + } + } + + // Move the unread characters to the beginning of the buffer. + buffer_len := len(parser.buffer) + if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len { + copy(parser.buffer, parser.buffer[parser.buffer_pos:]) + buffer_len -= parser.buffer_pos + parser.buffer_pos = 0 + } else if parser.buffer_pos == buffer_len { + buffer_len = 0 + parser.buffer_pos = 0 + } + + // Open the whole buffer for writing, and cut it before returning. + parser.buffer = parser.buffer[:cap(parser.buffer)] + + // Fill the buffer until it has enough characters. + first := true + for parser.unread < length { + + // Fill the raw buffer if necessary. + if !first || parser.raw_buffer_pos == len(parser.raw_buffer) { + if !yaml_parser_update_raw_buffer(parser) { + parser.buffer = parser.buffer[:buffer_len] + return false + } + } + first = false + + // Decode the raw buffer. + inner: + for parser.raw_buffer_pos != len(parser.raw_buffer) { + var value rune + var width int + + raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos + + // Decode the next character. + switch parser.encoding { + case yaml_UTF8_ENCODING: + // Decode a UTF-8 character. Check RFC 3629 + // (http://www.ietf.org/rfc/rfc3629.txt) for more details. + // + // The following table (taken from the RFC) is used for + // decoding. + // + // Char. number range | UTF-8 octet sequence + // (hexadecimal) | (binary) + // --------------------+------------------------------------ + // 0000 0000-0000 007F | 0xxxxxxx + // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx + // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx + // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + // + // Additionally, the characters in the range 0xD800-0xDFFF + // are prohibited as they are reserved for use with UTF-16 + // surrogate pairs. + + // Determine the length of the UTF-8 sequence. + octet := parser.raw_buffer[parser.raw_buffer_pos] + switch { + case octet&0x80 == 0x00: + width = 1 + case octet&0xE0 == 0xC0: + width = 2 + case octet&0xF0 == 0xE0: + width = 3 + case octet&0xF8 == 0xF0: + width = 4 + default: + // The leading octet is invalid. + return yaml_parser_set_reader_error(parser, + "invalid leading UTF-8 octet", + parser.offset, int(octet)) + } + + // Check if the raw buffer contains an incomplete character. + if width > raw_unread { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-8 octet sequence", + parser.offset, -1) + } + break inner + } + + // Decode the leading octet. + switch { + case octet&0x80 == 0x00: + value = rune(octet & 0x7F) + case octet&0xE0 == 0xC0: + value = rune(octet & 0x1F) + case octet&0xF0 == 0xE0: + value = rune(octet & 0x0F) + case octet&0xF8 == 0xF0: + value = rune(octet & 0x07) + default: + value = 0 + } + + // Check and decode the trailing octets. + for k := 1; k < width; k++ { + octet = parser.raw_buffer[parser.raw_buffer_pos+k] + + // Check if the octet is valid. + if (octet & 0xC0) != 0x80 { + return yaml_parser_set_reader_error(parser, + "invalid trailing UTF-8 octet", + parser.offset+k, int(octet)) + } + + // Decode the octet. + value = (value << 6) + rune(octet&0x3F) + } + + // Check the length of the sequence against the value. + switch { + case width == 1: + case width == 2 && value >= 0x80: + case width == 3 && value >= 0x800: + case width == 4 && value >= 0x10000: + default: + return yaml_parser_set_reader_error(parser, + "invalid length of a UTF-8 sequence", + parser.offset, -1) + } + + // Check the range of the value. + if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF { + return yaml_parser_set_reader_error(parser, + "invalid Unicode character", + parser.offset, int(value)) + } + + case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING: + var low, high int + if parser.encoding == yaml_UTF16LE_ENCODING { + low, high = 0, 1 + } else { + low, high = 1, 0 + } + + // The UTF-16 encoding is not as simple as one might + // naively think. Check RFC 2781 + // (http://www.ietf.org/rfc/rfc2781.txt). + // + // Normally, two subsequent bytes describe a Unicode + // character. However a special technique (called a + // surrogate pair) is used for specifying character + // values larger than 0xFFFF. + // + // A surrogate pair consists of two pseudo-characters: + // high surrogate area (0xD800-0xDBFF) + // low surrogate area (0xDC00-0xDFFF) + // + // The following formulas are used for decoding + // and encoding characters using surrogate pairs: + // + // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF) + // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF) + // W1 = 110110yyyyyyyyyy + // W2 = 110111xxxxxxxxxx + // + // where U is the character value, W1 is the high surrogate + // area, W2 is the low surrogate area. + + // Check for incomplete UTF-16 character. + if raw_unread < 2 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 character", + parser.offset, -1) + } + break inner + } + + // Get the character. + value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8) + + // Check for unexpected low surrogate area. + if value&0xFC00 == 0xDC00 { + return yaml_parser_set_reader_error(parser, + "unexpected low surrogate area", + parser.offset, int(value)) + } + + // Check for a high surrogate area. + if value&0xFC00 == 0xD800 { + width = 4 + + // Check for incomplete surrogate pair. + if raw_unread < 4 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 surrogate pair", + parser.offset, -1) + } + break inner + } + + // Get the next character. + value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8) + + // Check for a low surrogate area. + if value2&0xFC00 != 0xDC00 { + return yaml_parser_set_reader_error(parser, + "expected low surrogate area", + parser.offset+2, int(value2)) + } + + // Generate the value of the surrogate pair. + value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF) + } else { + width = 2 + } + + default: + panic("impossible") + } + + // Check if the character is in the allowed range: + // #x9 | #xA | #xD | [#x20-#x7E] (8 bit) + // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit) + // | [#x10000-#x10FFFF] (32 bit) + switch { + case value == 0x09: + case value == 0x0A: + case value == 0x0D: + case value >= 0x20 && value <= 0x7E: + case value == 0x85: + case value >= 0xA0 && value <= 0xD7FF: + case value >= 0xE000 && value <= 0xFFFD: + case value >= 0x10000 && value <= 0x10FFFF: + default: + return yaml_parser_set_reader_error(parser, + "control characters are not allowed", + parser.offset, int(value)) + } + + // Move the raw pointers. + parser.raw_buffer_pos += width + parser.offset += width + + // Finally put the character into the buffer. + if value <= 0x7F { + // 0000 0000-0000 007F . 0xxxxxxx + parser.buffer[buffer_len+0] = byte(value) + buffer_len += 1 + } else if value <= 0x7FF { + // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6)) + parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F)) + buffer_len += 2 + } else if value <= 0xFFFF { + // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F)) + buffer_len += 3 + } else { + // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F)) + buffer_len += 4 + } + + parser.unread++ + } + + // On EOF, put NUL into the buffer and return. + if parser.eof { + parser.buffer[buffer_len] = 0 + buffer_len++ + parser.unread++ + break + } + } + parser.buffer = parser.buffer[:buffer_len] + return true +} diff --git a/vendor/github.com/go-yaml/yaml/resolve.go b/vendor/github.com/go-yaml/yaml/resolve.go new file mode 100644 index 0000000000..232313cc08 --- /dev/null +++ b/vendor/github.com/go-yaml/yaml/resolve.go @@ -0,0 +1,208 @@ +package yaml + +import ( + "encoding/base64" + "math" + "regexp" + "strconv" + "strings" + "unicode/utf8" +) + +type resolveMapItem struct { + value interface{} + tag string +} + +var resolveTable = make([]byte, 256) +var resolveMap = make(map[string]resolveMapItem) + +func init() { + t := resolveTable + t[int('+')] = 'S' // Sign + t[int('-')] = 'S' + for _, c := range "0123456789" { + t[int(c)] = 'D' // Digit + } + for _, c := range "yYnNtTfFoO~" { + t[int(c)] = 'M' // In map + } + t[int('.')] = '.' // Float (potentially in map) + + var resolveMapList = []struct { + v interface{} + tag string + l []string + }{ + {true, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}}, + {true, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}}, + {true, yaml_BOOL_TAG, []string{"on", "On", "ON"}}, + {false, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}}, + {false, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}}, + {false, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}}, + {nil, yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}}, + {math.NaN(), yaml_FLOAT_TAG, []string{".nan", ".NaN", ".NAN"}}, + {math.Inf(+1), yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}}, + {math.Inf(+1), yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}}, + {math.Inf(-1), yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}}, + {"<<", yaml_MERGE_TAG, []string{"<<"}}, + } + + m := resolveMap + for _, item := range resolveMapList { + for _, s := range item.l { + m[s] = resolveMapItem{item.v, item.tag} + } + } +} + +const longTagPrefix = "tag:yaml.org,2002:" + +func shortTag(tag string) string { + // TODO This can easily be made faster and produce less garbage. + if strings.HasPrefix(tag, longTagPrefix) { + return "!!" + tag[len(longTagPrefix):] + } + return tag +} + +func longTag(tag string) string { + if strings.HasPrefix(tag, "!!") { + return longTagPrefix + tag[2:] + } + return tag +} + +func resolvableTag(tag string) bool { + switch tag { + case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG: + return true + } + return false +} + +var yamlStyleFloat = regexp.MustCompile(`^[-+]?[0-9]*\.?[0-9]+([eE][-+][0-9]+)?$`) + +func resolve(tag string, in string) (rtag string, out interface{}) { + if !resolvableTag(tag) { + return tag, in + } + + defer func() { + switch tag { + case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG: + return + } + failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag)) + }() + + // Any data is accepted as a !!str or !!binary. + // Otherwise, the prefix is enough of a hint about what it might be. + hint := byte('N') + if in != "" { + hint = resolveTable[in[0]] + } + if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG { + // Handle things we can lookup in a map. + if item, ok := resolveMap[in]; ok { + return item.tag, item.value + } + + // Base 60 floats are a bad idea, were dropped in YAML 1.2, and + // are purposefully unsupported here. They're still quoted on + // the way out for compatibility with other parser, though. + + switch hint { + case 'M': + // We've already checked the map above. + + case '.': + // Not in the map, so maybe a normal float. + floatv, err := strconv.ParseFloat(in, 64) + if err == nil { + return yaml_FLOAT_TAG, floatv + } + + case 'D', 'S': + // Int, float, or timestamp. + plain := strings.Replace(in, "_", "", -1) + intv, err := strconv.ParseInt(plain, 0, 64) + if err == nil { + if intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) + } else { + return yaml_INT_TAG, intv + } + } + uintv, err := strconv.ParseUint(plain, 0, 64) + if err == nil { + return yaml_INT_TAG, uintv + } + if yamlStyleFloat.MatchString(plain) { + floatv, err := strconv.ParseFloat(plain, 64) + if err == nil { + return yaml_FLOAT_TAG, floatv + } + } + if strings.HasPrefix(plain, "0b") { + intv, err := strconv.ParseInt(plain[2:], 2, 64) + if err == nil { + if intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) + } else { + return yaml_INT_TAG, intv + } + } + uintv, err := strconv.ParseUint(plain[2:], 2, 64) + if err == nil { + return yaml_INT_TAG, uintv + } + } else if strings.HasPrefix(plain, "-0b") { + intv, err := strconv.ParseInt(plain[3:], 2, 64) + if err == nil { + if intv == int64(int(intv)) { + return yaml_INT_TAG, -int(intv) + } else { + return yaml_INT_TAG, -intv + } + } + } + // XXX Handle timestamps here. + + default: + panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")") + } + } + if tag == yaml_BINARY_TAG { + return yaml_BINARY_TAG, in + } + if utf8.ValidString(in) { + return yaml_STR_TAG, in + } + return yaml_BINARY_TAG, encodeBase64(in) +} + +// encodeBase64 encodes s as base64 that is broken up into multiple lines +// as appropriate for the resulting length. +func encodeBase64(s string) string { + const lineLen = 70 + encLen := base64.StdEncoding.EncodedLen(len(s)) + lines := encLen/lineLen + 1 + buf := make([]byte, encLen*2+lines) + in := buf[0:encLen] + out := buf[encLen:] + base64.StdEncoding.Encode(in, []byte(s)) + k := 0 + for i := 0; i < len(in); i += lineLen { + j := i + lineLen + if j > len(in) { + j = len(in) + } + k += copy(out[k:], in[i:j]) + if lines > 1 { + out[k] = '\n' + k++ + } + } + return string(out[:k]) +} diff --git a/vendor/github.com/go-yaml/yaml/scannerc.go b/vendor/github.com/go-yaml/yaml/scannerc.go new file mode 100644 index 0000000000..2c9d5111f9 --- /dev/null +++ b/vendor/github.com/go-yaml/yaml/scannerc.go @@ -0,0 +1,2710 @@ +package yaml + +import ( + "bytes" + "fmt" +) + +// Introduction +// ************ +// +// The following notes assume that you are familiar with the YAML specification +// (http://yaml.org/spec/1.2/spec.html). We mostly follow it, although in +// some cases we are less restrictive that it requires. +// +// The process of transforming a YAML stream into a sequence of events is +// divided on two steps: Scanning and Parsing. +// +// The Scanner transforms the input stream into a sequence of tokens, while the +// parser transform the sequence of tokens produced by the Scanner into a +// sequence of parsing events. +// +// The Scanner is rather clever and complicated. The Parser, on the contrary, +// is a straightforward implementation of a recursive-descendant parser (or, +// LL(1) parser, as it is usually called). +// +// Actually there are two issues of Scanning that might be called "clever", the +// rest is quite straightforward. The issues are "block collection start" and +// "simple keys". Both issues are explained below in details. +// +// Here the Scanning step is explained and implemented. We start with the list +// of all the tokens produced by the Scanner together with short descriptions. +// +// Now, tokens: +// +// STREAM-START(encoding) # The stream start. +// STREAM-END # The stream end. +// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive. +// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive. +// DOCUMENT-START # '---' +// DOCUMENT-END # '...' +// BLOCK-SEQUENCE-START # Indentation increase denoting a block +// BLOCK-MAPPING-START # sequence or a block mapping. +// BLOCK-END # Indentation decrease. +// FLOW-SEQUENCE-START # '[' +// FLOW-SEQUENCE-END # ']' +// BLOCK-SEQUENCE-START # '{' +// BLOCK-SEQUENCE-END # '}' +// BLOCK-ENTRY # '-' +// FLOW-ENTRY # ',' +// KEY # '?' or nothing (simple keys). +// VALUE # ':' +// ALIAS(anchor) # '*anchor' +// ANCHOR(anchor) # '&anchor' +// TAG(handle,suffix) # '!handle!suffix' +// SCALAR(value,style) # A scalar. +// +// The following two tokens are "virtual" tokens denoting the beginning and the +// end of the stream: +// +// STREAM-START(encoding) +// STREAM-END +// +// We pass the information about the input stream encoding with the +// STREAM-START token. +// +// The next two tokens are responsible for tags: +// +// VERSION-DIRECTIVE(major,minor) +// TAG-DIRECTIVE(handle,prefix) +// +// Example: +// +// %YAML 1.1 +// %TAG ! !foo +// %TAG !yaml! tag:yaml.org,2002: +// --- +// +// The correspoding sequence of tokens: +// +// STREAM-START(utf-8) +// VERSION-DIRECTIVE(1,1) +// TAG-DIRECTIVE("!","!foo") +// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:") +// DOCUMENT-START +// STREAM-END +// +// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole +// line. +// +// The document start and end indicators are represented by: +// +// DOCUMENT-START +// DOCUMENT-END +// +// Note that if a YAML stream contains an implicit document (without '---' +// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be +// produced. +// +// In the following examples, we present whole documents together with the +// produced tokens. +// +// 1. An implicit document: +// +// 'a scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// STREAM-END +// +// 2. An explicit document: +// +// --- +// 'a scalar' +// ... +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// SCALAR("a scalar",single-quoted) +// DOCUMENT-END +// STREAM-END +// +// 3. Several documents in a stream: +// +// 'a scalar' +// --- +// 'another scalar' +// --- +// 'yet another scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// DOCUMENT-START +// SCALAR("another scalar",single-quoted) +// DOCUMENT-START +// SCALAR("yet another scalar",single-quoted) +// STREAM-END +// +// We have already introduced the SCALAR token above. The following tokens are +// used to describe aliases, anchors, tag, and scalars: +// +// ALIAS(anchor) +// ANCHOR(anchor) +// TAG(handle,suffix) +// SCALAR(value,style) +// +// The following series of examples illustrate the usage of these tokens: +// +// 1. A recursive sequence: +// +// &A [ *A ] +// +// Tokens: +// +// STREAM-START(utf-8) +// ANCHOR("A") +// FLOW-SEQUENCE-START +// ALIAS("A") +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A tagged scalar: +// +// !!float "3.14" # A good approximation. +// +// Tokens: +// +// STREAM-START(utf-8) +// TAG("!!","float") +// SCALAR("3.14",double-quoted) +// STREAM-END +// +// 3. Various scalar styles: +// +// --- # Implicit empty plain scalars do not produce tokens. +// --- a plain scalar +// --- 'a single-quoted scalar' +// --- "a double-quoted scalar" +// --- |- +// a literal scalar +// --- >- +// a folded +// scalar +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// DOCUMENT-START +// SCALAR("a plain scalar",plain) +// DOCUMENT-START +// SCALAR("a single-quoted scalar",single-quoted) +// DOCUMENT-START +// SCALAR("a double-quoted scalar",double-quoted) +// DOCUMENT-START +// SCALAR("a literal scalar",literal) +// DOCUMENT-START +// SCALAR("a folded scalar",folded) +// STREAM-END +// +// Now it's time to review collection-related tokens. We will start with +// flow collections: +// +// FLOW-SEQUENCE-START +// FLOW-SEQUENCE-END +// FLOW-MAPPING-START +// FLOW-MAPPING-END +// FLOW-ENTRY +// KEY +// VALUE +// +// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and +// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}' +// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the +// indicators '?' and ':', which are used for denoting mapping keys and values, +// are represented by the KEY and VALUE tokens. +// +// The following examples show flow collections: +// +// 1. A flow sequence: +// +// [item 1, item 2, item 3] +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-SEQUENCE-START +// SCALAR("item 1",plain) +// FLOW-ENTRY +// SCALAR("item 2",plain) +// FLOW-ENTRY +// SCALAR("item 3",plain) +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A flow mapping: +// +// { +// a simple key: a value, # Note that the KEY token is produced. +// ? a complex key: another value, +// } +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// FLOW-ENTRY +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// FLOW-ENTRY +// FLOW-MAPPING-END +// STREAM-END +// +// A simple key is a key which is not denoted by the '?' indicator. Note that +// the Scanner still produce the KEY token whenever it encounters a simple key. +// +// For scanning block collections, the following tokens are used (note that we +// repeat KEY and VALUE here): +// +// BLOCK-SEQUENCE-START +// BLOCK-MAPPING-START +// BLOCK-END +// BLOCK-ENTRY +// KEY +// VALUE +// +// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation +// increase that precedes a block collection (cf. the INDENT token in Python). +// The token BLOCK-END denote indentation decrease that ends a block collection +// (cf. the DEDENT token in Python). However YAML has some syntax pecularities +// that makes detections of these tokens more complex. +// +// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators +// '-', '?', and ':' correspondingly. +// +// The following examples show how the tokens BLOCK-SEQUENCE-START, +// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner: +// +// 1. Block sequences: +// +// - item 1 +// - item 2 +// - +// - item 3.1 +// - item 3.2 +// - +// key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 3.1",plain) +// BLOCK-ENTRY +// SCALAR("item 3.2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Block mappings: +// +// a simple key: a value # The KEY token is produced here. +// ? a complex key +// : another value +// a mapping: +// key 1: value 1 +// key 2: value 2 +// a sequence: +// - item 1 +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// KEY +// SCALAR("a mapping",plain) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML does not always require to start a new block collection from a new +// line. If the current line contains only '-', '?', and ':' indicators, a new +// block collection may start at the current line. The following examples +// illustrate this case: +// +// 1. Collections in a sequence: +// +// - - item 1 +// - item 2 +// - key 1: value 1 +// key 2: value 2 +// - ? complex key +// : complex value +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("complex key") +// VALUE +// SCALAR("complex value") +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Collections in a mapping: +// +// ? a sequence +// : - item 1 +// - item 2 +// ? a mapping +// : key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// KEY +// SCALAR("a mapping",plain) +// VALUE +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML also permits non-indented sequences if they are included into a block +// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced: +// +// key: +// - item 1 # BLOCK-SEQUENCE-START is NOT produced here. +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key",plain) +// VALUE +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// + +// Ensure that the buffer contains the required number of characters. +// Return true on success, false on failure (reader error or memory error). +func cache(parser *yaml_parser_t, length int) bool { + // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B) + return parser.unread >= length || yaml_parser_update_buffer(parser, length) +} + +// Advance the buffer pointer. +func skip(parser *yaml_parser_t) { + parser.mark.index++ + parser.mark.column++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) +} + +func skip_line(parser *yaml_parser_t) { + if is_crlf(parser.buffer, parser.buffer_pos) { + parser.mark.index += 2 + parser.mark.column = 0 + parser.mark.line++ + parser.unread -= 2 + parser.buffer_pos += 2 + } else if is_break(parser.buffer, parser.buffer_pos) { + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) + } +} + +// Copy a character to a string buffer and advance pointers. +func read(parser *yaml_parser_t, s []byte) []byte { + w := width(parser.buffer[parser.buffer_pos]) + if w == 0 { + panic("invalid character sequence") + } + if len(s) == 0 { + s = make([]byte, 0, 32) + } + if w == 1 && len(s)+w <= cap(s) { + s = s[:len(s)+1] + s[len(s)-1] = parser.buffer[parser.buffer_pos] + parser.buffer_pos++ + } else { + s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...) + parser.buffer_pos += w + } + parser.mark.index++ + parser.mark.column++ + parser.unread-- + return s +} + +// Copy a line break character to a string buffer and advance pointers. +func read_line(parser *yaml_parser_t, s []byte) []byte { + buf := parser.buffer + pos := parser.buffer_pos + switch { + case buf[pos] == '\r' && buf[pos+1] == '\n': + // CR LF . LF + s = append(s, '\n') + parser.buffer_pos += 2 + parser.mark.index++ + parser.unread-- + case buf[pos] == '\r' || buf[pos] == '\n': + // CR|LF . LF + s = append(s, '\n') + parser.buffer_pos += 1 + case buf[pos] == '\xC2' && buf[pos+1] == '\x85': + // NEL . LF + s = append(s, '\n') + parser.buffer_pos += 2 + case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'): + // LS|PS . LS|PS + s = append(s, buf[parser.buffer_pos:pos+3]...) + parser.buffer_pos += 3 + default: + return s + } + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + return s +} + +// Get the next token. +func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool { + // Erase the token object. + *token = yaml_token_t{} // [Go] Is this necessary? + + // No tokens after STREAM-END or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR { + return true + } + + // Ensure that the tokens queue contains enough tokens. + if !parser.token_available { + if !yaml_parser_fetch_more_tokens(parser) { + return false + } + } + + // Fetch the next token from the queue. + *token = parser.tokens[parser.tokens_head] + parser.tokens_head++ + parser.tokens_parsed++ + parser.token_available = false + + if token.typ == yaml_STREAM_END_TOKEN { + parser.stream_end_produced = true + } + return true +} + +// Set the scanner error and return false. +func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool { + parser.error = yaml_SCANNER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = parser.mark + return false +} + +func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool { + context := "while parsing a tag" + if directive { + context = "while parsing a %TAG directive" + } + return yaml_parser_set_scanner_error(parser, context, context_mark, "did not find URI escaped octet") +} + +func trace(args ...interface{}) func() { + pargs := append([]interface{}{"+++"}, args...) + fmt.Println(pargs...) + pargs = append([]interface{}{"---"}, args...) + return func() { fmt.Println(pargs...) } +} + +// Ensure that the tokens queue contains at least one token which can be +// returned to the Parser. +func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool { + // While we need more tokens to fetch, do it. + for { + // Check if we really need to fetch more tokens. + need_more_tokens := false + + if parser.tokens_head == len(parser.tokens) { + // Queue is empty. + need_more_tokens = true + } else { + // Check if any potential simple key may occupy the head position. + if !yaml_parser_stale_simple_keys(parser) { + return false + } + + for i := range parser.simple_keys { + simple_key := &parser.simple_keys[i] + if simple_key.possible && simple_key.token_number == parser.tokens_parsed { + need_more_tokens = true + break + } + } + } + + // We are finished. + if !need_more_tokens { + break + } + // Fetch the next token. + if !yaml_parser_fetch_next_token(parser) { + return false + } + } + + parser.token_available = true + return true +} + +// The dispatcher for token fetchers. +func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool { + // Ensure that the buffer is initialized. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we just started scanning. Fetch STREAM-START then. + if !parser.stream_start_produced { + return yaml_parser_fetch_stream_start(parser) + } + + // Eat whitespaces and comments until we reach the next token. + if !yaml_parser_scan_to_next_token(parser) { + return false + } + + // Remove obsolete potential simple keys. + if !yaml_parser_stale_simple_keys(parser) { + return false + } + + // Check the indentation level against the current column. + if !yaml_parser_unroll_indent(parser, parser.mark.column) { + return false + } + + // Ensure that the buffer contains at least 4 characters. 4 is the length + // of the longest indicators ('--- ' and '... '). + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + // Is it the end of the stream? + if is_z(parser.buffer, parser.buffer_pos) { + return yaml_parser_fetch_stream_end(parser) + } + + // Is it a directive? + if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' { + return yaml_parser_fetch_directive(parser) + } + + buf := parser.buffer + pos := parser.buffer_pos + + // Is it the document start indicator? + if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN) + } + + // Is it the document end indicator? + if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN) + } + + // Is it the flow sequence start indicator? + if buf[pos] == '[' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN) + } + + // Is it the flow mapping start indicator? + if parser.buffer[parser.buffer_pos] == '{' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN) + } + + // Is it the flow sequence end indicator? + if parser.buffer[parser.buffer_pos] == ']' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_SEQUENCE_END_TOKEN) + } + + // Is it the flow mapping end indicator? + if parser.buffer[parser.buffer_pos] == '}' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_MAPPING_END_TOKEN) + } + + // Is it the flow entry indicator? + if parser.buffer[parser.buffer_pos] == ',' { + return yaml_parser_fetch_flow_entry(parser) + } + + // Is it the block entry indicator? + if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) { + return yaml_parser_fetch_block_entry(parser) + } + + // Is it the key indicator? + if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_key(parser) + } + + // Is it the value indicator? + if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_value(parser) + } + + // Is it an alias? + if parser.buffer[parser.buffer_pos] == '*' { + return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN) + } + + // Is it an anchor? + if parser.buffer[parser.buffer_pos] == '&' { + return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN) + } + + // Is it a tag? + if parser.buffer[parser.buffer_pos] == '!' { + return yaml_parser_fetch_tag(parser) + } + + // Is it a literal scalar? + if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, true) + } + + // Is it a folded scalar? + if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, false) + } + + // Is it a single-quoted scalar? + if parser.buffer[parser.buffer_pos] == '\'' { + return yaml_parser_fetch_flow_scalar(parser, true) + } + + // Is it a double-quoted scalar? + if parser.buffer[parser.buffer_pos] == '"' { + return yaml_parser_fetch_flow_scalar(parser, false) + } + + // Is it a plain scalar? + // + // A plain scalar may start with any non-blank characters except + // + // '-', '?', ':', ',', '[', ']', '{', '}', + // '#', '&', '*', '!', '|', '>', '\'', '\"', + // '%', '@', '`'. + // + // In the block context (and, for the '-' indicator, in the flow context + // too), it may also start with the characters + // + // '-', '?', ':' + // + // if it is followed by a non-space character. + // + // The last rule is more restrictive than the specification requires. + // [Go] Make this logic more reasonable. + //switch parser.buffer[parser.buffer_pos] { + //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`': + //} + if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' || + parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' || + parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') || + (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level == 0 && + (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') && + !is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_plain_scalar(parser) + } + + // If we don't determine the token type so far, it is an error. + return yaml_parser_set_scanner_error(parser, + "while scanning for the next token", parser.mark, + "found character that cannot start any token") +} + +// Check the list of potential simple keys and remove the positions that +// cannot contain simple keys anymore. +func yaml_parser_stale_simple_keys(parser *yaml_parser_t) bool { + // Check for a potential simple key for each flow level. + for i := range parser.simple_keys { + simple_key := &parser.simple_keys[i] + + // The specification requires that a simple key + // + // - is limited to a single line, + // - is shorter than 1024 characters. + if simple_key.possible && (simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index) { + + // Check if the potential simple key to be removed is required. + if simple_key.required { + return yaml_parser_set_scanner_error(parser, + "while scanning a simple key", simple_key.mark, + "could not find expected ':'") + } + simple_key.possible = false + } + } + return true +} + +// Check if a simple key may start at the current position and add it if +// needed. +func yaml_parser_save_simple_key(parser *yaml_parser_t) bool { + // A simple key is required at the current position if the scanner is in + // the block context and the current column coincides with the indentation + // level. + + required := parser.flow_level == 0 && parser.indent == parser.mark.column + + // A simple key is required only when it is the first token in the current + // line. Therefore it is always allowed. But we add a check anyway. + if required && !parser.simple_key_allowed { + panic("should not happen") + } + + // + // If the current position may start a simple key, save it. + // + if parser.simple_key_allowed { + simple_key := yaml_simple_key_t{ + possible: true, + required: required, + token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + } + simple_key.mark = parser.mark + + if !yaml_parser_remove_simple_key(parser) { + return false + } + parser.simple_keys[len(parser.simple_keys)-1] = simple_key + } + return true +} + +// Remove a potential simple key at the current flow level. +func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool { + i := len(parser.simple_keys) - 1 + if parser.simple_keys[i].possible { + // If the key is required, it is an error. + if parser.simple_keys[i].required { + return yaml_parser_set_scanner_error(parser, + "while scanning a simple key", parser.simple_keys[i].mark, + "could not find expected ':'") + } + } + // Remove the key from the stack. + parser.simple_keys[i].possible = false + return true +} + +// Increase the flow level and resize the simple key list if needed. +func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool { + // Reset the simple key on the next level. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) + + // Increase the flow level. + parser.flow_level++ + return true +} + +// Decrease the flow level. +func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool { + if parser.flow_level > 0 { + parser.flow_level-- + parser.simple_keys = parser.simple_keys[:len(parser.simple_keys)-1] + } + return true +} + +// Push the current indentation level to the stack and set the new level +// the current column is greater than the indentation level. In this case, +// append or insert the specified token into the token queue. +func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + if parser.indent < column { + // Push the current indentation level to the stack and set the new + // indentation level. + parser.indents = append(parser.indents, parser.indent) + parser.indent = column + + // Create a token and insert it into the queue. + token := yaml_token_t{ + typ: typ, + start_mark: mark, + end_mark: mark, + } + if number > -1 { + number -= parser.tokens_parsed + } + yaml_insert_token(parser, number, &token) + } + return true +} + +// Pop indentation levels from the indents stack until the current level +// becomes less or equal to the column. For each indentation level, append +// the BLOCK-END token. +func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + // Loop through the indentation levels in the stack. + for parser.indent > column { + // Create a token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + + // Pop the indentation level. + parser.indent = parser.indents[len(parser.indents)-1] + parser.indents = parser.indents[:len(parser.indents)-1] + } + return true +} + +// Initialize the scanner and produce the STREAM-START token. +func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool { + + // Set the initial indentation. + parser.indent = -1 + + // Initialize the simple key stack. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) + + // A simple key is allowed at the beginning of the stream. + parser.simple_key_allowed = true + + // We have started. + parser.stream_start_produced = true + + // Create the STREAM-START token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_START_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + encoding: parser.encoding, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the STREAM-END token and shut down the scanner. +func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool { + + // Force new line. + if parser.mark.column != 0 { + parser.mark.column = 0 + parser.mark.line++ + } + + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the STREAM-END token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token. +func yaml_parser_fetch_directive(parser *yaml_parser_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token. + token := yaml_token_t{} + if !yaml_parser_scan_directive(parser, &token) { + return false + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the DOCUMENT-START or DOCUMENT-END token. +func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Consume the token. + start_mark := parser.mark + + skip(parser) + skip(parser) + skip(parser) + + end_mark := parser.mark + + // Create the DOCUMENT-START or DOCUMENT-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token. +func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // The indicators '[' and '{' may start a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // Increase the flow level. + if !yaml_parser_increase_flow_level(parser) { + return false + } + + // A simple key may follow the indicators '[' and '{'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token. +func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset any potential simple key on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Decrease the flow level. + if !yaml_parser_decrease_flow_level(parser) { + return false + } + + // No simple keys after the indicators ']' and '}'. + parser.simple_key_allowed = false + + // Consume the token. + + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-ENTRY token. +func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool { + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after ','. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_FLOW_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the BLOCK-ENTRY token. +func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool { + // Check if the scanner is in the block context. + if parser.flow_level == 0 { + // Check if we are allowed to start a new entry. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "block sequence entries are not allowed in this context") + } + // Add the BLOCK-SEQUENCE-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) { + return false + } + } else { + // It is an error for the '-' indicator to occur in the flow context, + // but we let the Parser detect and report about it because the Parser + // is able to point to the context. + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '-'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the BLOCK-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the KEY token. +func yaml_parser_fetch_key(parser *yaml_parser_t) bool { + + // In the block context, additional checks are required. + if parser.flow_level == 0 { + // Check if we are allowed to start a new key (not nessesary simple). + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping keys are not allowed in this context") + } + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '?' in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the KEY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the VALUE token. +func yaml_parser_fetch_value(parser *yaml_parser_t) bool { + + simple_key := &parser.simple_keys[len(parser.simple_keys)-1] + + // Have we found a simple key? + if simple_key.possible { + // Create the KEY token and insert it into the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: simple_key.mark, + end_mark: simple_key.mark, + } + yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token) + + // In the block context, we may need to add the BLOCK-MAPPING-START token. + if !yaml_parser_roll_indent(parser, simple_key.mark.column, + simple_key.token_number, + yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) { + return false + } + + // Remove the simple key. + simple_key.possible = false + + // A simple key cannot follow another simple key. + parser.simple_key_allowed = false + + } else { + // The ':' indicator follows a complex key. + + // In the block context, extra checks are required. + if parser.flow_level == 0 { + + // Check if we are allowed to start a complex value. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping values are not allowed in this context") + } + + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Simple keys after ':' are allowed in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + } + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the VALUE token and append it to the queue. + token := yaml_token_t{ + typ: yaml_VALUE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the ALIAS or ANCHOR token. +func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // An anchor or an alias could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow an anchor or an alias. + parser.simple_key_allowed = false + + // Create the ALIAS or ANCHOR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_anchor(parser, &token, typ) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the TAG token. +func yaml_parser_fetch_tag(parser *yaml_parser_t) bool { + // A tag could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a tag. + parser.simple_key_allowed = false + + // Create the TAG token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_tag(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens. +func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool { + // Remove any potential simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // A simple key may follow a block scalar. + parser.simple_key_allowed = true + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_block_scalar(parser, &token, literal) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens. +func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_flow_scalar(parser, &token, single) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,plain) token. +func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_plain_scalar(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Eat whitespaces and comments until the next token is found. +func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool { + + // Until the next token is not found. + for { + // Allow the BOM mark to start a line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) { + skip(parser) + } + + // Eat whitespaces. + // Tabs are allowed: + // - in the flow context + // - in the block context, but not at the beginning of the line or + // after '-', '?', or ':' (complex value). + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Eat a comment until a line break. + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // If it is a line break, eat it. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + + // In the block context, a new line may start a simple key. + if parser.flow_level == 0 { + parser.simple_key_allowed = true + } + } else { + break // We have found a token. + } + } + + return true +} + +// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool { + // Eat '%'. + start_mark := parser.mark + skip(parser) + + // Scan the directive name. + var name []byte + if !yaml_parser_scan_directive_name(parser, start_mark, &name) { + return false + } + + // Is it a YAML directive? + if bytes.Equal(name, []byte("YAML")) { + // Scan the VERSION directive value. + var major, minor int8 + if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) { + return false + } + end_mark := parser.mark + + // Create a VERSION-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_VERSION_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + major: major, + minor: minor, + } + + // Is it a TAG directive? + } else if bytes.Equal(name, []byte("TAG")) { + // Scan the TAG directive value. + var handle, prefix []byte + if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) { + return false + } + end_mark := parser.mark + + // Create a TAG-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_TAG_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + prefix: prefix, + } + + // Unknown directive. + } else { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unknown directive name") + return false + } + + // Eat the rest of the line including any comments. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + return true +} + +// Scan the directive name. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^ +// +func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool { + // Consume the directive name. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + var s []byte + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the name is empty. + if len(s) == 0 { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "could not find expected directive name") + return false + } + + // Check for an blank character after the name. + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unexpected non-alphabetical character") + return false + } + *name = s + return true +} + +// Scan the value of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^ +func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool { + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the major version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, major) { + return false + } + + // Eat '.'. + if parser.buffer[parser.buffer_pos] != '.' { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected digit or '.' character") + } + + skip(parser) + + // Consume the minor version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) { + return false + } + return true +} + +const max_number_length = 2 + +// Scan the version number of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^ +// %YAML 1.1 # a comment \n +// ^ +func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool { + + // Repeat while the next character is digit. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var value, length int8 + for is_digit(parser.buffer, parser.buffer_pos) { + // Check if the number is too long. + length++ + if length > max_number_length { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "found extremely long version number") + } + value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos)) + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the number was present. + if length == 0 { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected version number") + } + *number = value + return true +} + +// Scan the value of a TAG-DIRECTIVE token. +// +// Scope: +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool { + var handle_value, prefix_value []byte + + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a handle. + if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) { + return false + } + + // Expect a whitespace. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blank(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace") + return false + } + + // Eat whitespaces. + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a prefix. + if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) { + return false + } + + // Expect a whitespace or line break. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace or line break") + return false + } + + *handle = handle_value + *prefix = prefix_value + return true +} + +func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool { + var s []byte + + // Eat the indicator character. + start_mark := parser.mark + skip(parser) + + // Consume the value. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + end_mark := parser.mark + + /* + * Check if length of the anchor is greater than 0 and it is followed by + * a whitespace character or one of the indicators: + * + * '?', ':', ',', ']', '}', '%', '@', '`'. + */ + + if len(s) == 0 || + !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' || + parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '`') { + context := "while scanning an alias" + if typ == yaml_ANCHOR_TOKEN { + context = "while scanning an anchor" + } + yaml_parser_set_scanner_error(parser, context, start_mark, + "did not find expected alphabetic or numeric character") + return false + } + + // Create a token. + *token = yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + value: s, + } + + return true +} + +/* + * Scan a TAG token. + */ + +func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool { + var handle, suffix []byte + + start_mark := parser.mark + + // Check if the tag is in the canonical form. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + if parser.buffer[parser.buffer_pos+1] == '<' { + // Keep the handle as '' + + // Eat '!<' + skip(parser) + skip(parser) + + // Consume the tag value. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + + // Check for '>' and eat it. + if parser.buffer[parser.buffer_pos] != '>' { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find the expected '>'") + return false + } + + skip(parser) + } else { + // The tag has either the '!suffix' or the '!handle!suffix' form. + + // First, try to scan a handle. + if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) { + return false + } + + // Check if it is, indeed, handle. + if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' { + // Scan the suffix now. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + } else { + // It wasn't a handle after all. Scan the rest of the tag. + if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) { + return false + } + + // Set the handle to '!'. + handle = []byte{'!'} + + // A special case: the '!' tag. Set the handle to '' and the + // suffix to '!'. + if len(suffix) == 0 { + handle, suffix = suffix, handle + } + } + } + + // Check the character which ends the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find expected whitespace or line break") + return false + } + + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_TAG_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + suffix: suffix, + } + return true +} + +// Scan a tag handle. +func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool { + // Check the initial '!' character. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] != '!' { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + + var s []byte + + // Copy the '!' character. + s = read(parser, s) + + // Copy all subsequent alphabetical and numerical characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the trailing character is '!' and copy it. + if parser.buffer[parser.buffer_pos] == '!' { + s = read(parser, s) + } else { + // It's either the '!' tag or not really a tag handle. If it's a %TAG + // directive, it's an error. If it's a tag token, it must be a part of URI. + if directive && !(s[0] == '!' && s[1] == 0) { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + } + + *handle = s + return true +} + +// Scan a tag. +func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool { + //size_t length = head ? strlen((char *)head) : 0 + var s []byte + + // Copy the head if needed. + // + // Note that we don't copy the leading '!' character. + if len(head) > 1 { + s = append(s, head[1:]...) + } + + // Scan the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // The set of characters that may appear in URI is as follows: + // + // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&', + // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']', + // '%'. + // [Go] Convert this into more reasonable logic. + for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' || + parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' || + parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' || + parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' || + parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' || + parser.buffer[parser.buffer_pos] == '%' { + // Check if it is a URI-escape sequence. + if parser.buffer[parser.buffer_pos] == '%' { + if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) { + return false + } + } else { + s = read(parser, s) + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the tag is non-empty. + if len(s) == 0 { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected tag URI") + return false + } + *uri = s + return true +} + +// Decode an URI-escape sequence corresponding to a single UTF-8 character. +func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool { + + // Decode the required number of characters. + w := 1024 + for w > 0 { + // Check for a URI-escaped octet. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + + if !(parser.buffer[parser.buffer_pos] == '%' && + is_hex(parser.buffer, parser.buffer_pos+1) && + is_hex(parser.buffer, parser.buffer_pos+2)) { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find URI escaped octet") + } + + // Get the octet. + octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2)) + + // If it is the leading octet, determine the length of the UTF-8 sequence. + if w == 1024 { + w = width(octet) + if w == 0 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect leading UTF-8 octet") + } + } else { + // Check if the trailing octet is correct. + if octet&0xC0 != 0x80 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect trailing UTF-8 octet") + } + } + + // Copy the octet and move the pointers. + *s = append(*s, octet) + skip(parser) + skip(parser) + skip(parser) + w-- + } + return true +} + +// Scan a block scalar. +func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool { + // Eat the indicator '|' or '>'. + start_mark := parser.mark + skip(parser) + + // Scan the additional block scalar indicators. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check for a chomping indicator. + var chomping, increment int + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + // Set the chomping method and eat the indicator. + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + + // Check for an indentation indicator. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if is_digit(parser.buffer, parser.buffer_pos) { + // Check that the indentation is greater than 0. + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + + // Get the indentation level and eat the indicator. + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + } + + } else if is_digit(parser.buffer, parser.buffer_pos) { + // Do the same as above, but in the opposite order. + + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + } + } + + // Eat whitespaces and comments to the end of the line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + end_mark := parser.mark + + // Set the indentation level if it was specified. + var indent int + if increment > 0 { + if parser.indent >= 0 { + indent = parser.indent + increment + } else { + indent = increment + } + } + + // Scan the leading line breaks and determine the indentation level if needed. + var s, leading_break, trailing_breaks []byte + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + + // Scan the block scalar content. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var leading_blank, trailing_blank bool + for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) { + // We are at the beginning of a non-empty line. + + // Is it a trailing whitespace? + trailing_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Check if we need to fold the leading line break. + if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' { + // Do we need to join the lines by space? + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } + } else { + s = append(s, leading_break...) + } + leading_break = leading_break[:0] + + // Append the remaining line breaks. + s = append(s, trailing_breaks...) + trailing_breaks = trailing_breaks[:0] + + // Is it a leading whitespace? + leading_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Consume the current line. + for !is_breakz(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + leading_break = read_line(parser, leading_break) + + // Eat the following indentation spaces and line breaks. + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + } + + // Chomp the tail. + if chomping != -1 { + s = append(s, leading_break...) + } + if chomping == 1 { + s = append(s, trailing_breaks...) + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_LITERAL_SCALAR_STYLE, + } + if !literal { + token.style = yaml_FOLDED_SCALAR_STYLE + } + return true +} + +// Scan indentation spaces and line breaks for a block scalar. Determine the +// indentation level if needed. +func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool { + *end_mark = parser.mark + + // Eat the indentation spaces and line breaks. + max_indent := 0 + for { + // Eat the indentation spaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.mark.column > max_indent { + max_indent = parser.mark.column + } + + // Check for a tab character messing the indentation. + if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) { + return yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found a tab character where an indentation space is expected") + } + + // Have we found a non-empty line? + if !is_break(parser.buffer, parser.buffer_pos) { + break + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + // [Go] Should really be returning breaks instead. + *breaks = read_line(parser, *breaks) + *end_mark = parser.mark + } + + // Determine the indentation level if needed. + if *indent == 0 { + *indent = max_indent + if *indent < parser.indent+1 { + *indent = parser.indent + 1 + } + if *indent < 1 { + *indent = 1 + } + } + return true +} + +// Scan a quoted scalar. +func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool { + // Eat the left quote. + start_mark := parser.mark + skip(parser) + + // Consume the content of the quoted scalar. + var s, leading_break, trailing_breaks, whitespaces []byte + for { + // Check that there are no document indicators at the beginning of the line. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected document indicator") + return false + } + + // Check for EOF. + if is_z(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected end of stream") + return false + } + + // Consume non-blank characters. + leading_blanks := false + for !is_blankz(parser.buffer, parser.buffer_pos) { + if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' { + // Is is an escaped single quote. + s = append(s, '\'') + skip(parser) + skip(parser) + + } else if single && parser.buffer[parser.buffer_pos] == '\'' { + // It is a right single quote. + break + } else if !single && parser.buffer[parser.buffer_pos] == '"' { + // It is a right double quote. + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) { + // It is an escaped line break. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + skip(parser) + skip_line(parser) + leading_blanks = true + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' { + // It is an escape sequence. + code_length := 0 + + // Check the escape character. + switch parser.buffer[parser.buffer_pos+1] { + case '0': + s = append(s, 0) + case 'a': + s = append(s, '\x07') + case 'b': + s = append(s, '\x08') + case 't', '\t': + s = append(s, '\x09') + case 'n': + s = append(s, '\x0A') + case 'v': + s = append(s, '\x0B') + case 'f': + s = append(s, '\x0C') + case 'r': + s = append(s, '\x0D') + case 'e': + s = append(s, '\x1B') + case ' ': + s = append(s, '\x20') + case '"': + s = append(s, '"') + case '\'': + s = append(s, '\'') + case '\\': + s = append(s, '\\') + case 'N': // NEL (#x85) + s = append(s, '\xC2') + s = append(s, '\x85') + case '_': // #xA0 + s = append(s, '\xC2') + s = append(s, '\xA0') + case 'L': // LS (#x2028) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA8') + case 'P': // PS (#x2029) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA9') + case 'x': + code_length = 2 + case 'u': + code_length = 4 + case 'U': + code_length = 8 + default: + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found unknown escape character") + return false + } + + skip(parser) + skip(parser) + + // Consume an arbitrary escape code. + if code_length > 0 { + var value int + + // Scan the character value. + if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) { + return false + } + for k := 0; k < code_length; k++ { + if !is_hex(parser.buffer, parser.buffer_pos+k) { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "did not find expected hexdecimal number") + return false + } + value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k) + } + + // Check the value and write the character. + if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found invalid Unicode character escape code") + return false + } + if value <= 0x7F { + s = append(s, byte(value)) + } else if value <= 0x7FF { + s = append(s, byte(0xC0+(value>>6))) + s = append(s, byte(0x80+(value&0x3F))) + } else if value <= 0xFFFF { + s = append(s, byte(0xE0+(value>>12))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } else { + s = append(s, byte(0xF0+(value>>18))) + s = append(s, byte(0x80+((value>>12)&0x3F))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } + + // Advance the pointer. + for k := 0; k < code_length; k++ { + skip(parser) + } + } + } else { + // It is a non-escaped non-blank character. + s = read(parser, s) + } + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + // Check if we are at the end of the scalar. + if single { + if parser.buffer[parser.buffer_pos] == '\'' { + break + } + } else { + if parser.buffer[parser.buffer_pos] == '"' { + break + } + } + + // Consume blank characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Join the whitespaces or fold line breaks. + if leading_blanks { + // Do we need to fold line breaks? + if len(leading_break) > 0 && leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Eat the right quote. + skip(parser) + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_SINGLE_QUOTED_SCALAR_STYLE, + } + if !single { + token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + return true +} + +// Scan a plain scalar. +func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool { + + var s, leading_break, trailing_breaks, whitespaces []byte + var leading_blanks bool + var indent = parser.indent + 1 + + start_mark := parser.mark + end_mark := parser.mark + + // Consume the content of the plain scalar. + for { + // Check for a document indicator. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + break + } + + // Check for a comment. + if parser.buffer[parser.buffer_pos] == '#' { + break + } + + // Consume non-blank characters. + for !is_blankz(parser.buffer, parser.buffer_pos) { + + // Check for 'x:x' in the flow context. TODO: Fix the test "spec-08-13". + if parser.flow_level > 0 && + parser.buffer[parser.buffer_pos] == ':' && + !is_blankz(parser.buffer, parser.buffer_pos+1) { + yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", + start_mark, "found unexpected ':'") + return false + } + + // Check for indicators that may end a plain scalar. + if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level > 0 && + (parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == ':' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}')) { + break + } + + // Check if we need to join whitespaces and breaks. + if leading_blanks || len(whitespaces) > 0 { + if leading_blanks { + // Do we need to fold line breaks? + if leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + leading_blanks = false + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Copy the character. + s = read(parser, s) + + end_mark = parser.mark + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + // Is it the end? + if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) { + break + } + + // Consume blank characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + + // Check for tab character that abuse indentation. + if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", + start_mark, "found a tab character that violate indentation") + return false + } + + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check indentation level. + if parser.flow_level == 0 && parser.mark.column < indent { + break + } + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_PLAIN_SCALAR_STYLE, + } + + // Note that we change the 'simple_key_allowed' flag. + if leading_blanks { + parser.simple_key_allowed = true + } + return true +} diff --git a/vendor/github.com/go-yaml/yaml/sorter.go b/vendor/github.com/go-yaml/yaml/sorter.go new file mode 100644 index 0000000000..5958822f9c --- /dev/null +++ b/vendor/github.com/go-yaml/yaml/sorter.go @@ -0,0 +1,104 @@ +package yaml + +import ( + "reflect" + "unicode" +) + +type keyList []reflect.Value + +func (l keyList) Len() int { return len(l) } +func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } +func (l keyList) Less(i, j int) bool { + a := l[i] + b := l[j] + ak := a.Kind() + bk := b.Kind() + for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() { + a = a.Elem() + ak = a.Kind() + } + for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() { + b = b.Elem() + bk = b.Kind() + } + af, aok := keyFloat(a) + bf, bok := keyFloat(b) + if aok && bok { + if af != bf { + return af < bf + } + if ak != bk { + return ak < bk + } + return numLess(a, b) + } + if ak != reflect.String || bk != reflect.String { + return ak < bk + } + ar, br := []rune(a.String()), []rune(b.String()) + for i := 0; i < len(ar) && i < len(br); i++ { + if ar[i] == br[i] { + continue + } + al := unicode.IsLetter(ar[i]) + bl := unicode.IsLetter(br[i]) + if al && bl { + return ar[i] < br[i] + } + if al || bl { + return bl + } + var ai, bi int + var an, bn int64 + for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ { + an = an*10 + int64(ar[ai]-'0') + } + for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ { + bn = bn*10 + int64(br[bi]-'0') + } + if an != bn { + return an < bn + } + if ai != bi { + return ai < bi + } + return ar[i] < br[i] + } + return len(ar) < len(br) +} + +// keyFloat returns a float value for v if it is a number/bool +// and whether it is a number/bool or not. +func keyFloat(v reflect.Value) (f float64, ok bool) { + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return float64(v.Int()), true + case reflect.Float32, reflect.Float64: + return v.Float(), true + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return float64(v.Uint()), true + case reflect.Bool: + if v.Bool() { + return 1, true + } + return 0, true + } + return 0, false +} + +// numLess returns whether a < b. +// a and b must necessarily have the same kind. +func numLess(a, b reflect.Value) bool { + switch a.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return a.Int() < b.Int() + case reflect.Float32, reflect.Float64: + return a.Float() < b.Float() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return a.Uint() < b.Uint() + case reflect.Bool: + return !a.Bool() && b.Bool() + } + panic("not a number") +} diff --git a/vendor/github.com/go-yaml/yaml/suite_test.go b/vendor/github.com/go-yaml/yaml/suite_test.go new file mode 100644 index 0000000000..c5cf1ed4f6 --- /dev/null +++ b/vendor/github.com/go-yaml/yaml/suite_test.go @@ -0,0 +1,12 @@ +package yaml_test + +import ( + . "gopkg.in/check.v1" + "testing" +) + +func Test(t *testing.T) { TestingT(t) } + +type S struct{} + +var _ = Suite(&S{}) diff --git a/vendor/github.com/go-yaml/yaml/writerc.go b/vendor/github.com/go-yaml/yaml/writerc.go new file mode 100644 index 0000000000..190362f25d --- /dev/null +++ b/vendor/github.com/go-yaml/yaml/writerc.go @@ -0,0 +1,89 @@ +package yaml + +// Set the writer error and return false. +func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_WRITER_ERROR + emitter.problem = problem + return false +} + +// Flush the output buffer. +func yaml_emitter_flush(emitter *yaml_emitter_t) bool { + if emitter.write_handler == nil { + panic("write handler not set") + } + + // Check if the buffer is empty. + if emitter.buffer_pos == 0 { + return true + } + + // If the output encoding is UTF-8, we don't need to recode the buffer. + if emitter.encoding == yaml_UTF8_ENCODING { + if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { + return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) + } + emitter.buffer_pos = 0 + return true + } + + // Recode the buffer into the raw buffer. + var low, high int + if emitter.encoding == yaml_UTF16LE_ENCODING { + low, high = 0, 1 + } else { + high, low = 1, 0 + } + + pos := 0 + for pos < emitter.buffer_pos { + // See the "reader.c" code for more details on UTF-8 encoding. Note + // that we assume that the buffer contains a valid UTF-8 sequence. + + // Read the next UTF-8 character. + octet := emitter.buffer[pos] + + var w int + var value rune + switch { + case octet&0x80 == 0x00: + w, value = 1, rune(octet&0x7F) + case octet&0xE0 == 0xC0: + w, value = 2, rune(octet&0x1F) + case octet&0xF0 == 0xE0: + w, value = 3, rune(octet&0x0F) + case octet&0xF8 == 0xF0: + w, value = 4, rune(octet&0x07) + } + for k := 1; k < w; k++ { + octet = emitter.buffer[pos+k] + value = (value << 6) + (rune(octet) & 0x3F) + } + pos += w + + // Write the character. + if value < 0x10000 { + var b [2]byte + b[high] = byte(value >> 8) + b[low] = byte(value & 0xFF) + emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1]) + } else { + // Write the character using a surrogate pair (check "reader.c"). + var b [4]byte + value -= 0x10000 + b[high] = byte(0xD8 + (value >> 18)) + b[low] = byte((value >> 10) & 0xFF) + b[high+2] = byte(0xDC + ((value >> 8) & 0xFF)) + b[low+2] = byte(value & 0xFF) + emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1], b[2], b[3]) + } + } + + // Write the raw buffer. + if err := emitter.write_handler(emitter, emitter.raw_buffer); err != nil { + return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) + } + emitter.buffer_pos = 0 + emitter.raw_buffer = emitter.raw_buffer[:0] + return true +} diff --git a/vendor/github.com/go-yaml/yaml/yaml.go b/vendor/github.com/go-yaml/yaml/yaml.go new file mode 100644 index 0000000000..36d6b883a6 --- /dev/null +++ b/vendor/github.com/go-yaml/yaml/yaml.go @@ -0,0 +1,346 @@ +// Package yaml implements YAML support for the Go language. +// +// Source code and other details for the project are available at GitHub: +// +// https://github.com/go-yaml/yaml +// +package yaml + +import ( + "errors" + "fmt" + "reflect" + "strings" + "sync" +) + +// MapSlice encodes and decodes as a YAML map. +// The order of keys is preserved when encoding and decoding. +type MapSlice []MapItem + +// MapItem is an item in a MapSlice. +type MapItem struct { + Key, Value interface{} +} + +// The Unmarshaler interface may be implemented by types to customize their +// behavior when being unmarshaled from a YAML document. The UnmarshalYAML +// method receives a function that may be called to unmarshal the original +// YAML value into a field or variable. It is safe to call the unmarshal +// function parameter more than once if necessary. +type Unmarshaler interface { + UnmarshalYAML(unmarshal func(interface{}) error) error +} + +// The Marshaler interface may be implemented by types to customize their +// behavior when being marshaled into a YAML document. The returned value +// is marshaled in place of the original value implementing Marshaler. +// +// If an error is returned by MarshalYAML, the marshaling procedure stops +// and returns with the provided error. +type Marshaler interface { + MarshalYAML() (interface{}, error) +} + +// Unmarshal decodes the first document found within the in byte slice +// and assigns decoded values into the out value. +// +// Maps and pointers (to a struct, string, int, etc) are accepted as out +// values. If an internal pointer within a struct is not initialized, +// the yaml package will initialize it if necessary for unmarshalling +// the provided data. The out parameter must not be nil. +// +// The type of the decoded values should be compatible with the respective +// values in out. If one or more values cannot be decoded due to a type +// mismatches, decoding continues partially until the end of the YAML +// content, and a *yaml.TypeError is returned with details for all +// missed values. +// +// Struct fields are only unmarshalled if they are exported (have an +// upper case first letter), and are unmarshalled using the field name +// lowercased as the default key. Custom keys may be defined via the +// "yaml" name in the field tag: the content preceding the first comma +// is used as the key, and the following comma-separated options are +// used to tweak the marshalling process (see Marshal). +// Conflicting names result in a runtime error. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// var t T +// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t) +// +// See the documentation of Marshal for the format of tags and a list of +// supported tag options. +// +func Unmarshal(in []byte, out interface{}) (err error) { + defer handleErr(&err) + d := newDecoder() + p := newParser(in) + defer p.destroy() + node := p.parse() + if node != nil { + v := reflect.ValueOf(out) + if v.Kind() == reflect.Ptr && !v.IsNil() { + v = v.Elem() + } + d.unmarshal(node, v) + } + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +// Marshal serializes the value provided into a YAML document. The structure +// of the generated document will reflect the structure of the value itself. +// Maps and pointers (to struct, string, int, etc) are accepted as the in value. +// +// Struct fields are only unmarshalled if they are exported (have an upper case +// first letter), and are unmarshalled using the field name lowercased as the +// default key. Custom keys may be defined via the "yaml" name in the field +// tag: the content preceding the first comma is used as the key, and the +// following comma-separated options are used to tweak the marshalling process. +// Conflicting names result in a runtime error. +// +// The field tag format accepted is: +// +// `(...) yaml:"[][,[,]]" (...)` +// +// The following flags are currently supported: +// +// omitempty Only include the field if it's not set to the zero +// value for the type or to empty slices or maps. +// Does not apply to zero valued structs. +// +// flow Marshal using a flow style (useful for structs, +// sequences and maps). +// +// inline Inline the field, which must be a struct or a map, +// causing all of its fields or keys to be processed as if +// they were part of the outer struct. For maps, keys must +// not conflict with the yaml keys of other struct fields. +// +// In addition, if the key is "-", the field is ignored. +// +// For example: +// +// type T struct { +// F int "a,omitempty" +// B int +// } +// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n" +// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n" +// +func Marshal(in interface{}) (out []byte, err error) { + defer handleErr(&err) + e := newEncoder() + defer e.destroy() + e.marshal("", reflect.ValueOf(in)) + e.finish() + out = e.out + return +} + +func handleErr(err *error) { + if v := recover(); v != nil { + if e, ok := v.(yamlError); ok { + *err = e.err + } else { + panic(v) + } + } +} + +type yamlError struct { + err error +} + +func fail(err error) { + panic(yamlError{err}) +} + +func failf(format string, args ...interface{}) { + panic(yamlError{fmt.Errorf("yaml: "+format, args...)}) +} + +// A TypeError is returned by Unmarshal when one or more fields in +// the YAML document cannot be properly decoded into the requested +// types. When this error is returned, the value is still +// unmarshaled partially. +type TypeError struct { + Errors []string +} + +func (e *TypeError) Error() string { + return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n ")) +} + +// -------------------------------------------------------------------------- +// Maintain a mapping of keys to structure field indexes + +// The code in this section was copied from mgo/bson. + +// structInfo holds details for the serialization of fields of +// a given struct. +type structInfo struct { + FieldsMap map[string]fieldInfo + FieldsList []fieldInfo + + // InlineMap is the number of the field in the struct that + // contains an ,inline map, or -1 if there's none. + InlineMap int +} + +type fieldInfo struct { + Key string + Num int + OmitEmpty bool + Flow bool + + // Inline holds the field index if the field is part of an inlined struct. + Inline []int +} + +var structMap = make(map[reflect.Type]*structInfo) +var fieldMapMutex sync.RWMutex + +func getStructInfo(st reflect.Type) (*structInfo, error) { + fieldMapMutex.RLock() + sinfo, found := structMap[st] + fieldMapMutex.RUnlock() + if found { + return sinfo, nil + } + + n := st.NumField() + fieldsMap := make(map[string]fieldInfo) + fieldsList := make([]fieldInfo, 0, n) + inlineMap := -1 + for i := 0; i != n; i++ { + field := st.Field(i) + if field.PkgPath != "" && !field.Anonymous { + continue // Private field + } + + info := fieldInfo{Num: i} + + tag := field.Tag.Get("yaml") + if tag == "" && strings.Index(string(field.Tag), ":") < 0 { + tag = string(field.Tag) + } + if tag == "-" { + continue + } + + inline := false + fields := strings.Split(tag, ",") + if len(fields) > 1 { + for _, flag := range fields[1:] { + switch flag { + case "omitempty": + info.OmitEmpty = true + case "flow": + info.Flow = true + case "inline": + inline = true + default: + return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st)) + } + } + tag = fields[0] + } + + if inline { + switch field.Type.Kind() { + case reflect.Map: + if inlineMap >= 0 { + return nil, errors.New("Multiple ,inline maps in struct " + st.String()) + } + if field.Type.Key() != reflect.TypeOf("") { + return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String()) + } + inlineMap = info.Num + case reflect.Struct: + sinfo, err := getStructInfo(field.Type) + if err != nil { + return nil, err + } + for _, finfo := range sinfo.FieldsList { + if _, found := fieldsMap[finfo.Key]; found { + msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + if finfo.Inline == nil { + finfo.Inline = []int{i, finfo.Num} + } else { + finfo.Inline = append([]int{i}, finfo.Inline...) + } + fieldsMap[finfo.Key] = finfo + fieldsList = append(fieldsList, finfo) + } + default: + //return nil, errors.New("Option ,inline needs a struct value or map field") + return nil, errors.New("Option ,inline needs a struct value field") + } + continue + } + + if tag != "" { + info.Key = tag + } else { + info.Key = strings.ToLower(field.Name) + } + + if _, found = fieldsMap[info.Key]; found { + msg := "Duplicated key '" + info.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + + fieldsList = append(fieldsList, info) + fieldsMap[info.Key] = info + } + + sinfo = &structInfo{fieldsMap, fieldsList, inlineMap} + + fieldMapMutex.Lock() + structMap[st] = sinfo + fieldMapMutex.Unlock() + return sinfo, nil +} + +func isZero(v reflect.Value) bool { + switch v.Kind() { + case reflect.String: + return len(v.String()) == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + case reflect.Slice: + return v.Len() == 0 + case reflect.Map: + return v.Len() == 0 + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Struct: + vt := v.Type() + for i := v.NumField() - 1; i >= 0; i-- { + if vt.Field(i).PkgPath != "" { + continue // Private field + } + if !isZero(v.Field(i)) { + return false + } + } + return true + } + return false +} diff --git a/vendor/github.com/go-yaml/yaml/yamlh.go b/vendor/github.com/go-yaml/yaml/yamlh.go new file mode 100644 index 0000000000..d60a6b6b00 --- /dev/null +++ b/vendor/github.com/go-yaml/yaml/yamlh.go @@ -0,0 +1,716 @@ +package yaml + +import ( + "io" +) + +// The version directive data. +type yaml_version_directive_t struct { + major int8 // The major version number. + minor int8 // The minor version number. +} + +// The tag directive data. +type yaml_tag_directive_t struct { + handle []byte // The tag handle. + prefix []byte // The tag prefix. +} + +type yaml_encoding_t int + +// The stream encoding. +const ( + // Let the parser choose the encoding. + yaml_ANY_ENCODING yaml_encoding_t = iota + + yaml_UTF8_ENCODING // The default UTF-8 encoding. + yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM. + yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM. +) + +type yaml_break_t int + +// Line break types. +const ( + // Let the parser choose the break type. + yaml_ANY_BREAK yaml_break_t = iota + + yaml_CR_BREAK // Use CR for line breaks (Mac style). + yaml_LN_BREAK // Use LN for line breaks (Unix style). + yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style). +) + +type yaml_error_type_t int + +// Many bad things could happen with the parser and emitter. +const ( + // No error is produced. + yaml_NO_ERROR yaml_error_type_t = iota + + yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory. + yaml_READER_ERROR // Cannot read or decode the input stream. + yaml_SCANNER_ERROR // Cannot scan the input stream. + yaml_PARSER_ERROR // Cannot parse the input stream. + yaml_COMPOSER_ERROR // Cannot compose a YAML document. + yaml_WRITER_ERROR // Cannot write to the output stream. + yaml_EMITTER_ERROR // Cannot emit a YAML stream. +) + +// The pointer position. +type yaml_mark_t struct { + index int // The position index. + line int // The position line. + column int // The position column. +} + +// Node Styles + +type yaml_style_t int8 + +type yaml_scalar_style_t yaml_style_t + +// Scalar styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota + + yaml_PLAIN_SCALAR_STYLE // The plain scalar style. + yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style. + yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style. + yaml_LITERAL_SCALAR_STYLE // The literal scalar style. + yaml_FOLDED_SCALAR_STYLE // The folded scalar style. +) + +type yaml_sequence_style_t yaml_style_t + +// Sequence styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota + + yaml_BLOCK_SEQUENCE_STYLE // The block sequence style. + yaml_FLOW_SEQUENCE_STYLE // The flow sequence style. +) + +type yaml_mapping_style_t yaml_style_t + +// Mapping styles. +const ( + // Let the emitter choose the style. + yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota + + yaml_BLOCK_MAPPING_STYLE // The block mapping style. + yaml_FLOW_MAPPING_STYLE // The flow mapping style. +) + +// Tokens + +type yaml_token_type_t int + +// Token types. +const ( + // An empty token. + yaml_NO_TOKEN yaml_token_type_t = iota + + yaml_STREAM_START_TOKEN // A STREAM-START token. + yaml_STREAM_END_TOKEN // A STREAM-END token. + + yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token. + yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token. + yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token. + yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token. + + yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token. + yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token. + yaml_BLOCK_END_TOKEN // A BLOCK-END token. + + yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token. + yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token. + yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token. + yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token. + + yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token. + yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token. + yaml_KEY_TOKEN // A KEY token. + yaml_VALUE_TOKEN // A VALUE token. + + yaml_ALIAS_TOKEN // An ALIAS token. + yaml_ANCHOR_TOKEN // An ANCHOR token. + yaml_TAG_TOKEN // A TAG token. + yaml_SCALAR_TOKEN // A SCALAR token. +) + +func (tt yaml_token_type_t) String() string { + switch tt { + case yaml_NO_TOKEN: + return "yaml_NO_TOKEN" + case yaml_STREAM_START_TOKEN: + return "yaml_STREAM_START_TOKEN" + case yaml_STREAM_END_TOKEN: + return "yaml_STREAM_END_TOKEN" + case yaml_VERSION_DIRECTIVE_TOKEN: + return "yaml_VERSION_DIRECTIVE_TOKEN" + case yaml_TAG_DIRECTIVE_TOKEN: + return "yaml_TAG_DIRECTIVE_TOKEN" + case yaml_DOCUMENT_START_TOKEN: + return "yaml_DOCUMENT_START_TOKEN" + case yaml_DOCUMENT_END_TOKEN: + return "yaml_DOCUMENT_END_TOKEN" + case yaml_BLOCK_SEQUENCE_START_TOKEN: + return "yaml_BLOCK_SEQUENCE_START_TOKEN" + case yaml_BLOCK_MAPPING_START_TOKEN: + return "yaml_BLOCK_MAPPING_START_TOKEN" + case yaml_BLOCK_END_TOKEN: + return "yaml_BLOCK_END_TOKEN" + case yaml_FLOW_SEQUENCE_START_TOKEN: + return "yaml_FLOW_SEQUENCE_START_TOKEN" + case yaml_FLOW_SEQUENCE_END_TOKEN: + return "yaml_FLOW_SEQUENCE_END_TOKEN" + case yaml_FLOW_MAPPING_START_TOKEN: + return "yaml_FLOW_MAPPING_START_TOKEN" + case yaml_FLOW_MAPPING_END_TOKEN: + return "yaml_FLOW_MAPPING_END_TOKEN" + case yaml_BLOCK_ENTRY_TOKEN: + return "yaml_BLOCK_ENTRY_TOKEN" + case yaml_FLOW_ENTRY_TOKEN: + return "yaml_FLOW_ENTRY_TOKEN" + case yaml_KEY_TOKEN: + return "yaml_KEY_TOKEN" + case yaml_VALUE_TOKEN: + return "yaml_VALUE_TOKEN" + case yaml_ALIAS_TOKEN: + return "yaml_ALIAS_TOKEN" + case yaml_ANCHOR_TOKEN: + return "yaml_ANCHOR_TOKEN" + case yaml_TAG_TOKEN: + return "yaml_TAG_TOKEN" + case yaml_SCALAR_TOKEN: + return "yaml_SCALAR_TOKEN" + } + return "" +} + +// The token structure. +type yaml_token_t struct { + // The token type. + typ yaml_token_type_t + + // The start/end of the token. + start_mark, end_mark yaml_mark_t + + // The stream encoding (for yaml_STREAM_START_TOKEN). + encoding yaml_encoding_t + + // The alias/anchor/scalar value or tag/tag directive handle + // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN). + value []byte + + // The tag suffix (for yaml_TAG_TOKEN). + suffix []byte + + // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN). + prefix []byte + + // The scalar style (for yaml_SCALAR_TOKEN). + style yaml_scalar_style_t + + // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN). + major, minor int8 +} + +// Events + +type yaml_event_type_t int8 + +// Event types. +const ( + // An empty event. + yaml_NO_EVENT yaml_event_type_t = iota + + yaml_STREAM_START_EVENT // A STREAM-START event. + yaml_STREAM_END_EVENT // A STREAM-END event. + yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event. + yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event. + yaml_ALIAS_EVENT // An ALIAS event. + yaml_SCALAR_EVENT // A SCALAR event. + yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event. + yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event. + yaml_MAPPING_START_EVENT // A MAPPING-START event. + yaml_MAPPING_END_EVENT // A MAPPING-END event. +) + +// The event structure. +type yaml_event_t struct { + + // The event type. + typ yaml_event_type_t + + // The start and end of the event. + start_mark, end_mark yaml_mark_t + + // The document encoding (for yaml_STREAM_START_EVENT). + encoding yaml_encoding_t + + // The version directive (for yaml_DOCUMENT_START_EVENT). + version_directive *yaml_version_directive_t + + // The list of tag directives (for yaml_DOCUMENT_START_EVENT). + tag_directives []yaml_tag_directive_t + + // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT). + anchor []byte + + // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + tag []byte + + // The scalar value (for yaml_SCALAR_EVENT). + value []byte + + // Is the document start/end indicator implicit, or the tag optional? + // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT). + implicit bool + + // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT). + quoted_implicit bool + + // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + style yaml_style_t +} + +func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) } +func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) } +func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) } + +// Nodes + +const ( + yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null. + yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false. + yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values. + yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values. + yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values. + yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values. + + yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences. + yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping. + + // Not in original libyaml. + yaml_BINARY_TAG = "tag:yaml.org,2002:binary" + yaml_MERGE_TAG = "tag:yaml.org,2002:merge" + + yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str. + yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq. + yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map. +) + +type yaml_node_type_t int + +// Node types. +const ( + // An empty node. + yaml_NO_NODE yaml_node_type_t = iota + + yaml_SCALAR_NODE // A scalar node. + yaml_SEQUENCE_NODE // A sequence node. + yaml_MAPPING_NODE // A mapping node. +) + +// An element of a sequence node. +type yaml_node_item_t int + +// An element of a mapping node. +type yaml_node_pair_t struct { + key int // The key of the element. + value int // The value of the element. +} + +// The node structure. +type yaml_node_t struct { + typ yaml_node_type_t // The node type. + tag []byte // The node tag. + + // The node data. + + // The scalar parameters (for yaml_SCALAR_NODE). + scalar struct { + value []byte // The scalar value. + length int // The length of the scalar value. + style yaml_scalar_style_t // The scalar style. + } + + // The sequence parameters (for YAML_SEQUENCE_NODE). + sequence struct { + items_data []yaml_node_item_t // The stack of sequence items. + style yaml_sequence_style_t // The sequence style. + } + + // The mapping parameters (for yaml_MAPPING_NODE). + mapping struct { + pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value). + pairs_start *yaml_node_pair_t // The beginning of the stack. + pairs_end *yaml_node_pair_t // The end of the stack. + pairs_top *yaml_node_pair_t // The top of the stack. + style yaml_mapping_style_t // The mapping style. + } + + start_mark yaml_mark_t // The beginning of the node. + end_mark yaml_mark_t // The end of the node. + +} + +// The document structure. +type yaml_document_t struct { + + // The document nodes. + nodes []yaml_node_t + + // The version directive. + version_directive *yaml_version_directive_t + + // The list of tag directives. + tag_directives_data []yaml_tag_directive_t + tag_directives_start int // The beginning of the tag directives list. + tag_directives_end int // The end of the tag directives list. + + start_implicit int // Is the document start indicator implicit? + end_implicit int // Is the document end indicator implicit? + + // The start/end of the document. + start_mark, end_mark yaml_mark_t +} + +// The prototype of a read handler. +// +// The read handler is called when the parser needs to read more bytes from the +// source. The handler should write not more than size bytes to the buffer. +// The number of written bytes should be set to the size_read variable. +// +// [in,out] data A pointer to an application data specified by +// yaml_parser_set_input(). +// [out] buffer The buffer to write the data from the source. +// [in] size The size of the buffer. +// [out] size_read The actual number of bytes read from the source. +// +// On success, the handler should return 1. If the handler failed, +// the returned value should be 0. On EOF, the handler should set the +// size_read to 0 and return 1. +type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error) + +// This structure holds information about a potential simple key. +type yaml_simple_key_t struct { + possible bool // Is a simple key possible? + required bool // Is a simple key required? + token_number int // The number of the token. + mark yaml_mark_t // The position mark. +} + +// The states of the parser. +type yaml_parser_state_t int + +const ( + yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota + + yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document. + yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START. + yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_PARSE_BLOCK_NODE_STATE // Expect a block node. + yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence. + yaml_PARSE_FLOW_NODE_STATE // Expect a flow node. + yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence. + yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence. + yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence. + yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key. + yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value. + yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry. + yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping. + yaml_PARSE_END_STATE // Expect nothing. +) + +func (ps yaml_parser_state_t) String() string { + switch ps { + case yaml_PARSE_STREAM_START_STATE: + return "yaml_PARSE_STREAM_START_STATE" + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_START_STATE: + return "yaml_PARSE_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return "yaml_PARSE_DOCUMENT_CONTENT_STATE" + case yaml_PARSE_DOCUMENT_END_STATE: + return "yaml_PARSE_DOCUMENT_END_STATE" + case yaml_PARSE_BLOCK_NODE_STATE: + return "yaml_PARSE_BLOCK_NODE_STATE" + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE" + case yaml_PARSE_FLOW_NODE_STATE: + return "yaml_PARSE_FLOW_NODE_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE" + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE" + case yaml_PARSE_END_STATE: + return "yaml_PARSE_END_STATE" + } + return "" +} + +// This structure holds aliases data. +type yaml_alias_data_t struct { + anchor []byte // The anchor. + index int // The node id. + mark yaml_mark_t // The anchor mark. +} + +// The parser structure. +// +// All members are internal. Manage the structure using the +// yaml_parser_ family of functions. +type yaml_parser_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + + problem string // Error description. + + // The byte about which the problem occured. + problem_offset int + problem_value int + problem_mark yaml_mark_t + + // The error context. + context string + context_mark yaml_mark_t + + // Reader stuff + + read_handler yaml_read_handler_t // Read handler. + + input_file io.Reader // File input data. + input []byte // String input data. + input_pos int + + eof bool // EOF flag + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + unread int // The number of unread characters in the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The input encoding. + + offset int // The offset of the current position (in bytes). + mark yaml_mark_t // The mark of the current position. + + // Scanner stuff + + stream_start_produced bool // Have we started to scan the input stream? + stream_end_produced bool // Have we reached the end of the input stream? + + flow_level int // The number of unclosed '[' and '{' indicators. + + tokens []yaml_token_t // The tokens queue. + tokens_head int // The head of the tokens queue. + tokens_parsed int // The number of tokens fetched from the queue. + token_available bool // Does the tokens queue contain a token ready for dequeueing. + + indent int // The current indentation level. + indents []int // The indentation levels stack. + + simple_key_allowed bool // May a simple key occur at the current position? + simple_keys []yaml_simple_key_t // The stack of simple keys. + + // Parser stuff + + state yaml_parser_state_t // The current parser state. + states []yaml_parser_state_t // The parser states stack. + marks []yaml_mark_t // The stack of marks. + tag_directives []yaml_tag_directive_t // The list of TAG directives. + + // Dumper stuff + + aliases []yaml_alias_data_t // The alias data. + + document *yaml_document_t // The currently parsed document. +} + +// Emitter Definitions + +// The prototype of a write handler. +// +// The write handler is called when the emitter needs to flush the accumulated +// characters to the output. The handler should write @a size bytes of the +// @a buffer to the output. +// +// @param[in,out] data A pointer to an application data specified by +// yaml_emitter_set_output(). +// @param[in] buffer The buffer with bytes to be written. +// @param[in] size The size of the buffer. +// +// @returns On success, the handler should return @c 1. If the handler failed, +// the returned value should be @c 0. +// +type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error + +type yaml_emitter_state_t int + +// The emitter states. +const ( + // Expect STREAM-START. + yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota + + yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence. + yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence. + yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence. + yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence. + yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping. + yaml_EMIT_END_STATE // Expect nothing. +) + +// The emitter structure. +// +// All members are internal. Manage the structure using the @c yaml_emitter_ +// family of functions. +type yaml_emitter_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + problem string // Error description. + + // Writer stuff + + write_handler yaml_write_handler_t // Write handler. + + output_buffer *[]byte // String output data. + output_file io.Writer // File output data. + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The stream encoding. + + // Emitter stuff + + canonical bool // If the output is in the canonical style? + best_indent int // The number of indentation spaces. + best_width int // The preferred width of the output lines. + unicode bool // Allow unescaped non-ASCII characters? + line_break yaml_break_t // The preferred line break. + + state yaml_emitter_state_t // The current emitter state. + states []yaml_emitter_state_t // The stack of states. + + events []yaml_event_t // The event queue. + events_head int // The head of the event queue. + + indents []int // The stack of indentation levels. + + tag_directives []yaml_tag_directive_t // The list of tag directives. + + indent int // The current indentation level. + + flow_level int // The current flow level. + + root_context bool // Is it the document root context? + sequence_context bool // Is it a sequence context? + mapping_context bool // Is it a mapping context? + simple_key_context bool // Is it a simple mapping key context? + + line int // The current line. + column int // The current column. + whitespace bool // If the last character was a whitespace? + indention bool // If the last character was an indentation character (' ', '-', '?', ':')? + open_ended bool // If an explicit document end is required? + + // Anchor analysis. + anchor_data struct { + anchor []byte // The anchor value. + alias bool // Is it an alias? + } + + // Tag analysis. + tag_data struct { + handle []byte // The tag handle. + suffix []byte // The tag suffix. + } + + // Scalar analysis. + scalar_data struct { + value []byte // The scalar value. + multiline bool // Does the scalar contain line breaks? + flow_plain_allowed bool // Can the scalar be expessed in the flow plain style? + block_plain_allowed bool // Can the scalar be expressed in the block plain style? + single_quoted_allowed bool // Can the scalar be expressed in the single quoted style? + block_allowed bool // Can the scalar be expressed in the literal or folded styles? + style yaml_scalar_style_t // The output style. + } + + // Dumper stuff + + opened bool // If the stream was already opened? + closed bool // If the stream was already closed? + + // The information associated with the document nodes. + anchors *struct { + references int // The number of references. + anchor int // The anchor id. + serialized bool // If the node has been emitted? + } + + last_anchor_id int // The last assigned anchor id. + + document *yaml_document_t // The currently emitted document. +} diff --git a/vendor/github.com/go-yaml/yaml/yamlprivateh.go b/vendor/github.com/go-yaml/yaml/yamlprivateh.go new file mode 100644 index 0000000000..8110ce3c37 --- /dev/null +++ b/vendor/github.com/go-yaml/yaml/yamlprivateh.go @@ -0,0 +1,173 @@ +package yaml + +const ( + // The size of the input raw buffer. + input_raw_buffer_size = 512 + + // The size of the input buffer. + // It should be possible to decode the whole raw buffer. + input_buffer_size = input_raw_buffer_size * 3 + + // The size of the output buffer. + output_buffer_size = 128 + + // The size of the output raw buffer. + // It should be possible to encode the whole output buffer. + output_raw_buffer_size = (output_buffer_size*2 + 2) + + // The size of other stacks and queues. + initial_stack_size = 16 + initial_queue_size = 16 + initial_string_size = 16 +) + +// Check if the character at the specified position is an alphabetical +// character, a digit, '_', or '-'. +func is_alpha(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-' +} + +// Check if the character at the specified position is a digit. +func is_digit(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' +} + +// Get the value of a digit. +func as_digit(b []byte, i int) int { + return int(b[i]) - '0' +} + +// Check if the character at the specified position is a hex-digit. +func is_hex(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f' +} + +// Get the value of a hex-digit. +func as_hex(b []byte, i int) int { + bi := b[i] + if bi >= 'A' && bi <= 'F' { + return int(bi) - 'A' + 10 + } + if bi >= 'a' && bi <= 'f' { + return int(bi) - 'a' + 10 + } + return int(bi) - '0' +} + +// Check if the character is ASCII. +func is_ascii(b []byte, i int) bool { + return b[i] <= 0x7F +} + +// Check if the character at the start of the buffer can be printed unescaped. +func is_printable(b []byte, i int) bool { + return ((b[i] == 0x0A) || // . == #x0A + (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E + (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF + (b[i] > 0xC2 && b[i] < 0xED) || + (b[i] == 0xED && b[i+1] < 0xA0) || + (b[i] == 0xEE) || + (b[i] == 0xEF && // #xE000 <= . <= #xFFFD + !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF + !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF)))) +} + +// Check if the character at the specified position is NUL. +func is_z(b []byte, i int) bool { + return b[i] == 0x00 +} + +// Check if the beginning of the buffer is a BOM. +func is_bom(b []byte, i int) bool { + return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF +} + +// Check if the character at the specified position is space. +func is_space(b []byte, i int) bool { + return b[i] == ' ' +} + +// Check if the character at the specified position is tab. +func is_tab(b []byte, i int) bool { + return b[i] == '\t' +} + +// Check if the character at the specified position is blank (space or tab). +func is_blank(b []byte, i int) bool { + //return is_space(b, i) || is_tab(b, i) + return b[i] == ' ' || b[i] == '\t' +} + +// Check if the character at the specified position is a line break. +func is_break(b []byte, i int) bool { + return (b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029) +} + +func is_crlf(b []byte, i int) bool { + return b[i] == '\r' && b[i+1] == '\n' +} + +// Check if the character is a line break or NUL. +func is_breakz(b []byte, i int) bool { + //return is_break(b, i) || is_z(b, i) + return ( // is_break: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + // is_z: + b[i] == 0) +} + +// Check if the character is a line break, space, or NUL. +func is_spacez(b []byte, i int) bool { + //return is_space(b, i) || is_breakz(b, i) + return ( // is_space: + b[i] == ' ' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Check if the character is a line break, space, tab, or NUL. +func is_blankz(b []byte, i int) bool { + //return is_blank(b, i) || is_breakz(b, i) + return ( // is_blank: + b[i] == ' ' || b[i] == '\t' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Determine the width of the character. +func width(b byte) int { + // Don't replace these by a switch without first + // confirming that it is being inlined. + if b&0x80 == 0x00 { + return 1 + } + if b&0xE0 == 0xC0 { + return 2 + } + if b&0xF0 == 0xE0 { + return 3 + } + if b&0xF8 == 0xF0 { + return 4 + } + return 0 + +} diff --git a/vendor/github.com/pelletier/go-toml/marshal_test.go b/vendor/github.com/pelletier/go-toml/marshal_test.go index e2db67fce5..891222e9b1 100644 --- a/vendor/github.com/pelletier/go-toml/marshal_test.go +++ b/vendor/github.com/pelletier/go-toml/marshal_test.go @@ -145,8 +145,8 @@ var docData = testDoc{ Second: &subdoc, }, SubDocList: []testSubDoc{ - {"List.First", 0}, - {"List.Second", 0}, + testSubDoc{"List.First", 0}, + testSubDoc{"List.Second", 0}, }, SubDocPtrs: []*testSubDoc{&subdoc}, } @@ -504,7 +504,7 @@ var strPtr = []*string{&str1, &str2} var strPtr2 = []*[]*string{&strPtr} var nestedTestData = nestedMarshalTestStruct{ - String: [][]string{{"Five", "Six"}, {"One", "Two"}}, + String: [][]string{[]string{"Five", "Six"}, []string{"One", "Two"}}, StringPtr: &strPtr2, } diff --git a/vendor/github.com/pelletier/go-toml/tomltree_create_test.go b/vendor/github.com/pelletier/go-toml/tomltree_create_test.go index 7e683fd1ba..6c1496835e 100644 --- a/vendor/github.com/pelletier/go-toml/tomltree_create_test.go +++ b/vendor/github.com/pelletier/go-toml/tomltree_create_test.go @@ -1,9 +1,9 @@ package toml import ( - "strconv" "testing" "time" + "strconv" ) type customString string @@ -60,7 +60,7 @@ func TestTomlTreeCreateToTree(t *testing.T) { }, "array": []string{"a", "b", "c"}, "array_uint": []uint{uint(1), uint(2)}, - "array_table": []map[string]interface{}{{"sub_map": 52}}, + "array_table": []map[string]interface{}{map[string]interface{}{"sub_map": 52}}, "array_times": []time.Time{time.Now(), time.Now()}, "map_times": map[string]time.Time{"now": time.Now()}, "custom_string_map_key": map[customString]interface{}{customString("custom"): "custom"}, @@ -97,7 +97,7 @@ func TestTomlTreeCreateToTreeInvalidArrayMemberType(t *testing.T) { } func TestTomlTreeCreateToTreeInvalidTableGroupType(t *testing.T) { - _, err := TreeFromMap(map[string]interface{}{"foo": []map[string]interface{}{{"hello": t}}}) + _, err := TreeFromMap(map[string]interface{}{"foo": []map[string]interface{}{map[string]interface{}{"hello": t}}}) expected := "cannot convert type *testing.T to TomlTree" if err.Error() != expected { t.Fatalf("expected error %s, got %s", expected, err.Error()) diff --git a/vendor/github.com/pelletier/go-toml/tomltree_write.go b/vendor/github.com/pelletier/go-toml/tomltree_write.go index 4df87eba0f..6a7fa17458 100644 --- a/vendor/github.com/pelletier/go-toml/tomltree_write.go +++ b/vendor/github.com/pelletier/go-toml/tomltree_write.go @@ -4,11 +4,11 @@ import ( "bytes" "fmt" "io" - "reflect" "sort" "strconv" "strings" "time" + "reflect" ) // encodes a string to a TOML-compliant string value