From 42ec5e2762d54db4c6678785d24b5a52690e8186 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 17 Nov 2023 21:17:54 +0000 Subject: [PATCH] Bump github.com/containers/common from 0.56.0 to 0.57.0 Bumps [github.com/containers/common](https://github.com/containers/common) from 0.56.0 to 0.57.0. - [Release notes](https://github.com/containers/common/releases) - [Commits](https://github.com/containers/common/compare/v0.56.0...v0.57.0) --- updated-dependencies: - dependency-name: github.com/containers/common dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 12 +- go.sum | 33 +- .../containerd/log/context_deprecated.go | 149 + .../github.com/containerd/log/.golangci.yml | 30 + vendor/github.com/containerd/log/LICENSE | 191 ++ vendor/github.com/containerd/log/README.md | 17 + .../{containerd => }/log/context.go | 0 .../common/internal/attributedstring/slice.go | 102 + .../containers/common/libimage/copier.go | 14 +- .../common/libimage/define/platform.go | 11 + .../containers/common/libimage/disk_usage.go | 3 + .../containers/common/libimage/events.go | 3 + .../containers/common/libimage/filters.go | 15 +- .../containers/common/libimage/history.go | 54 +- .../containers/common/libimage/image.go | 78 +- .../common/libimage/image_config.go | 3 + .../containers/common/libimage/image_tree.go | 3 + .../containers/common/libimage/import.go | 3 + .../containers/common/libimage/inspect.go | 3 + .../containers/common/libimage/layer_tree.go | 25 + .../containers/common/libimage/load.go | 3 + .../common/libimage/manifest_list.go | 5 + .../common/libimage/manifests/manifests.go | 45 +- .../containers/common/libimage/normalize.go | 3 + .../containers/common/libimage/oci.go | 3 + .../containers/common/libimage/platform.go | 99 +- .../common/libimage/platform/platform.go | 57 + .../containers/common/libimage/pull.go | 3 + .../containers/common/libimage/push.go | 3 + .../containers/common/libimage/runtime.go | 13 +- .../containers/common/libimage/save.go | 3 + .../containers/common/libimage/search.go | 3 + .../common/libnetwork/cni/network.go | 16 +- .../common/libnetwork/netavark/config.go | 5 +- .../common/libnetwork/netavark/network.go | 14 +- .../common/libnetwork/network/interface.go | 4 +- .../common/libnetwork/pasta/pasta.go | 2 +- .../libnetwork/slirp4netns/slirp4netns.go | 4 +- .../common/libnetwork/types/const.go | 1 + .../common/libnetwork/util/filters.go | 4 +- .../common/pkg/apparmor/apparmor_linux.go | 5 + .../containers/common/pkg/auth/auth.go | 87 +- .../containers/common/pkg/auth/cli.go | 32 +- .../containers/common/pkg/cgroups/blkio.go | 151 - .../containers/common/pkg/cgroups/cgroups.go | 614 ---- .../common/pkg/cgroups/cgroups_linux.go | 181 +- .../common/pkg/cgroups/cgroups_supported.go | 9 + .../common/pkg/cgroups/cgroups_unsupported.go | 8 + .../containers/common/pkg/cgroups/cpu.go | 91 - .../containers/common/pkg/cgroups/cpuset.go | 49 - .../containers/common/pkg/cgroups/memory.go | 69 - .../containers/common/pkg/cgroups/pids.go | 71 - .../containers/common/pkg/cgroups/utils.go | 179 -- .../common/pkg/cgroups/utils_linux.go | 4 +- .../containers/common/pkg/config/config.go | 186 +- .../common/pkg/config/config_local.go | 6 +- .../common/pkg/config/containers.conf | 29 +- .../common/pkg/config/containers.conf-freebsd | 29 +- .../common/pkg/config/db_backend.go | 17 +- .../containers/common/pkg/config/default.go | 161 +- .../containers/common/pkg/filters/filters.go | 55 +- .../common/pkg/password/password_supported.go | 57 + .../common/pkg/password/password_windows.go | 21 + .../common/pkg/subscriptions/subscriptions.go | 29 + .../containers/common/pkg/util/copy.go | 57 - .../containers/common/pkg/util/util.go | 122 - .../common/pkg/util/util_supported.go | 44 - .../common/pkg/util/util_windows.go | 15 - .../containers/common/pkg/version/version.go | 105 + .../containers/common/version/version.go | 2 +- .../github.com/fsnotify/fsnotify/.cirrus.yml | 13 + .../github.com/fsnotify/fsnotify/.gitignore | 1 + .../github.com/fsnotify/fsnotify/CHANGELOG.md | 83 +- vendor/github.com/fsnotify/fsnotify/README.md | 81 +- .../fsnotify/fsnotify/backend_fen.go | 552 +++- .../fsnotify/fsnotify/backend_inotify.go | 377 ++- .../fsnotify/fsnotify/backend_kqueue.go | 295 +- .../fsnotify/fsnotify/backend_other.go | 205 +- .../fsnotify/fsnotify/backend_windows.go | 247 +- .../github.com/fsnotify/fsnotify/fsnotify.go | 91 +- vendor/github.com/fsnotify/fsnotify/mkdoc.zsh | 125 +- .../runtime-tools/generate/generate.go | 2 +- .../generate/seccomp/parse_action.go | 6 +- .../generate/seccomp/seccomp_default.go | 2 - vendor/modules.txt | 23 +- vendor/sigs.k8s.io/yaml/LICENSE | 256 ++ vendor/sigs.k8s.io/yaml/OWNERS | 8 +- vendor/sigs.k8s.io/yaml/fields.go | 55 +- vendor/sigs.k8s.io/yaml/goyaml.v2/LICENSE | 201 ++ .../yaml/goyaml.v2/LICENSE.libyaml | 31 + vendor/sigs.k8s.io/yaml/goyaml.v2/NOTICE | 13 + vendor/sigs.k8s.io/yaml/goyaml.v2/OWNERS | 24 + vendor/sigs.k8s.io/yaml/goyaml.v2/README.md | 143 + vendor/sigs.k8s.io/yaml/goyaml.v2/apic.go | 744 +++++ vendor/sigs.k8s.io/yaml/goyaml.v2/decode.go | 815 +++++ vendor/sigs.k8s.io/yaml/goyaml.v2/emitterc.go | 1685 ++++++++++ vendor/sigs.k8s.io/yaml/goyaml.v2/encode.go | 390 +++ vendor/sigs.k8s.io/yaml/goyaml.v2/parserc.go | 1095 +++++++ vendor/sigs.k8s.io/yaml/goyaml.v2/readerc.go | 412 +++ vendor/sigs.k8s.io/yaml/goyaml.v2/resolve.go | 258 ++ vendor/sigs.k8s.io/yaml/goyaml.v2/scannerc.go | 2711 +++++++++++++++++ vendor/sigs.k8s.io/yaml/goyaml.v2/sorter.go | 113 + vendor/sigs.k8s.io/yaml/goyaml.v2/writerc.go | 26 + vendor/sigs.k8s.io/yaml/goyaml.v2/yaml.go | 478 +++ vendor/sigs.k8s.io/yaml/goyaml.v2/yamlh.go | 739 +++++ .../yaml/goyaml.v2/yamlprivateh.go | 173 ++ vendor/sigs.k8s.io/yaml/yaml.go | 145 +- vendor/sigs.k8s.io/yaml/yaml_go110.go | 17 + .../container-device-interface/LICENSE | 201 ++ .../pkg/parser/parser.go | 212 ++ 110 files changed, 14094 insertions(+), 2478 deletions(-) create mode 100644 vendor/github.com/containerd/containerd/log/context_deprecated.go create mode 100644 vendor/github.com/containerd/log/.golangci.yml create mode 100644 vendor/github.com/containerd/log/LICENSE create mode 100644 vendor/github.com/containerd/log/README.md rename vendor/github.com/containerd/{containerd => }/log/context.go (100%) create mode 100644 vendor/github.com/containers/common/internal/attributedstring/slice.go create mode 100644 vendor/github.com/containers/common/libimage/define/platform.go create mode 100644 vendor/github.com/containers/common/libimage/platform/platform.go delete mode 100644 vendor/github.com/containers/common/pkg/cgroups/blkio.go delete mode 100644 vendor/github.com/containers/common/pkg/cgroups/cgroups.go delete mode 100644 vendor/github.com/containers/common/pkg/cgroups/cpu.go delete mode 100644 vendor/github.com/containers/common/pkg/cgroups/cpuset.go delete mode 100644 vendor/github.com/containers/common/pkg/cgroups/memory.go delete mode 100644 vendor/github.com/containers/common/pkg/cgroups/pids.go delete mode 100644 vendor/github.com/containers/common/pkg/cgroups/utils.go create mode 100644 vendor/github.com/containers/common/pkg/password/password_supported.go create mode 100644 vendor/github.com/containers/common/pkg/password/password_windows.go delete mode 100644 vendor/github.com/containers/common/pkg/util/copy.go create mode 100644 vendor/github.com/containers/common/pkg/version/version.go create mode 100644 vendor/github.com/fsnotify/fsnotify/.cirrus.yml create mode 100644 vendor/sigs.k8s.io/yaml/goyaml.v2/LICENSE create mode 100644 vendor/sigs.k8s.io/yaml/goyaml.v2/LICENSE.libyaml create mode 100644 vendor/sigs.k8s.io/yaml/goyaml.v2/NOTICE create mode 100644 vendor/sigs.k8s.io/yaml/goyaml.v2/OWNERS create mode 100644 vendor/sigs.k8s.io/yaml/goyaml.v2/README.md create mode 100644 vendor/sigs.k8s.io/yaml/goyaml.v2/apic.go create mode 100644 vendor/sigs.k8s.io/yaml/goyaml.v2/decode.go create mode 100644 vendor/sigs.k8s.io/yaml/goyaml.v2/emitterc.go create mode 100644 vendor/sigs.k8s.io/yaml/goyaml.v2/encode.go create mode 100644 vendor/sigs.k8s.io/yaml/goyaml.v2/parserc.go create mode 100644 vendor/sigs.k8s.io/yaml/goyaml.v2/readerc.go create mode 100644 vendor/sigs.k8s.io/yaml/goyaml.v2/resolve.go create mode 100644 vendor/sigs.k8s.io/yaml/goyaml.v2/scannerc.go create mode 100644 vendor/sigs.k8s.io/yaml/goyaml.v2/sorter.go create mode 100644 vendor/sigs.k8s.io/yaml/goyaml.v2/writerc.go create mode 100644 vendor/sigs.k8s.io/yaml/goyaml.v2/yaml.go create mode 100644 vendor/sigs.k8s.io/yaml/goyaml.v2/yamlh.go create mode 100644 vendor/sigs.k8s.io/yaml/goyaml.v2/yamlprivateh.go create mode 100644 vendor/tags.cncf.io/container-device-interface/LICENSE create mode 100644 vendor/tags.cncf.io/container-device-interface/pkg/parser/parser.go diff --git a/go.mod b/go.mod index 36431298..6800cfb6 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/containers/prometheus-podman-exporter go 1.18 require ( - github.com/containers/common v0.56.0 + github.com/containers/common v0.57.0 github.com/containers/image/v5 v5.29.0 github.com/containers/podman/v4 v4.7.2 github.com/go-kit/log v0.2.1 @@ -33,7 +33,8 @@ require ( github.com/chzyer/readline v1.5.1 // indirect github.com/container-orchestrated-devices/container-device-interface v0.6.1 // indirect github.com/containerd/cgroups/v3 v3.0.2 // indirect - github.com/containerd/containerd v1.7.6 // indirect + github.com/containerd/containerd v1.7.9 // indirect + github.com/containerd/log v0.1.0 // indirect github.com/containerd/stargz-snapshotter/estargz v0.15.1 // indirect github.com/containernetworking/cni v1.1.2 // indirect github.com/containernetworking/plugins v1.3.0 // indirect @@ -57,7 +58,7 @@ require ( github.com/docker/go-connections v0.4.1-0.20210727194412-58542c764a11 // indirect github.com/docker/go-plugins-helpers v0.0.0-20211224144127-6eecb7beb651 // indirect github.com/docker/go-units v0.5.0 // indirect - github.com/fsnotify/fsnotify v1.6.0 // indirect + github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/fsouza/go-dockerclient v1.9.7 // indirect github.com/go-jose/go-jose/v3 v3.0.1 // indirect github.com/go-logfmt/logfmt v0.5.1 // indirect @@ -116,7 +117,7 @@ require ( github.com/opencontainers/image-spec v1.1.0-rc5 // indirect github.com/opencontainers/runc v1.1.10 // indirect github.com/opencontainers/runtime-spec v1.1.1-0.20230823135140-4fec88fd00a4 // indirect - github.com/opencontainers/runtime-tools v0.9.1-0.20230317050512-e931285f4b69 // indirect + github.com/opencontainers/runtime-tools v0.9.1-0.20230914150019-408c51e934dc // indirect github.com/opencontainers/selinux v1.11.0 // indirect github.com/openshift/imagebuilder v1.2.5 // indirect github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f // indirect @@ -166,7 +167,8 @@ require ( gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/kubernetes v1.28.2 // indirect - sigs.k8s.io/yaml v1.3.0 // indirect + sigs.k8s.io/yaml v1.4.0 // indirect + tags.cncf.io/container-device-interface v0.6.2 // indirect ) replace github.com/opencontainers/runc => github.com/opencontainers/runc v1.1.1-0.20230904132852-a0466dd76f23 diff --git a/go.sum b/go.sum index 82fce89b..70e783fb 100644 --- a/go.sum +++ b/go.sum @@ -182,8 +182,8 @@ github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09Zvgq github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s= github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g= github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0QMhscqVp1AR9c= -github.com/containerd/containerd v1.7.6 h1:oNAVsnhPoy4BTPQivLgTzI9Oleml9l/+eYIDYXRCYo8= -github.com/containerd/containerd v1.7.6/go.mod h1:SY6lrkkuJT40BVNO37tlYTSnKJnP5AXBc0fhx0q+TJ4= +github.com/containerd/containerd v1.7.9 h1:KOhK01szQbM80YfW1H6RZKh85PHGqY/9OcEZ35Je8sc= +github.com/containerd/containerd v1.7.9/go.mod h1:0/W44LWEYfSHoxBtsHIiNU/duEkgpMokemafHVCpq9Y= github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= @@ -208,6 +208,8 @@ github.com/containerd/imgcrypt v1.0.1/go.mod h1:mdd8cEPW7TPgNG4FpuP3sGBiQ7Yi/zak github.com/containerd/imgcrypt v1.0.4-0.20210301171431-0ae5c75f59ba/go.mod h1:6TNsg0ctmizkrOgXRNQjAPFWpMYRWuiB6dSF4Pfa5SA= github.com/containerd/imgcrypt v1.1.1-0.20210312161619-7ed62a527887/go.mod h1:5AZJNI6sLHJljKuI9IHnw1pWqo/F0nGDOuR9zgTs7ow= github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJrXQb0Dpc4ms= +github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= +github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c= github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= @@ -241,8 +243,8 @@ github.com/containernetworking/plugins v1.3.0 h1:QVNXMT6XloyMUoO2wUOqWTC1hWFV62Q github.com/containernetworking/plugins v1.3.0/go.mod h1:Pc2wcedTQQCVuROOOaLBPPxrEXqqXBFt3cZ+/yVg6l0= github.com/containers/buildah v1.32.0 h1:uz5Rcf7lGeStj7iPTBgO4UdhQYZqMMzyt9suDf16k1k= github.com/containers/buildah v1.32.0/go.mod h1:sN3rA3DbnqekNz3bNdkqWduuirYDuMs54LUCOZOomBE= -github.com/containers/common v0.56.0 h1:hysHUsEai1EkMXanU26UV55wMXns/a6AYmaFqJ4fEMY= -github.com/containers/common v0.56.0/go.mod h1:IjaDdfUtcs2CfCcJMZxuut4XlvkTkY9Nlqkso9xCOq4= +github.com/containers/common v0.57.0 h1:5O/+6QUBafKK0/zeok9y1rLPukfWgdE0sT4nuzmyAqk= +github.com/containers/common v0.57.0/go.mod h1:t/Z+/sFrapvFMEJe3YnecN49/Tae2wYEQShbEN6SRaU= github.com/containers/conmon v2.0.20+incompatible h1:YbCVSFSCqFjjVwHTPINGdMX1F6JXHGTUje2ZYobNrkg= github.com/containers/conmon v2.0.20+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I= github.com/containers/image/v5 v5.29.0 h1:9+nhS/ZM7c4Kuzu5tJ0NMpxrgoryOJ2HAYTgG8Ny7j4= @@ -360,8 +362,8 @@ github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM github.com/frankban/quicktest v1.14.5/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= -github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/fsouza/go-dockerclient v1.9.7 h1:FlIrT71E62zwKgRvCvWGdxRD+a/pIy+miY/n3MXgfuw= github.com/fsouza/go-dockerclient v1.9.7/go.mod h1:vx9C32kE2D15yDSOMCDaAEIARZpDQDFBHeqL3MgQy/U= github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA= @@ -384,7 +386,7 @@ github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNV github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= +github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= github.com/go-openapi/analysis v0.21.2/go.mod h1:HZwRk4RRisyG8vx2Oe6aqeSQcoxRp47Xkp3+K6q+LdY= github.com/go-openapi/analysis v0.21.4 h1:ZDFLvSNxpDaomuCueM0BlSXxpANBlFYiBvr+GXrvIHc= github.com/go-openapi/analysis v0.21.4/go.mod h1:4zQ35W4neeZTqh3ol0rv/O8JBbka9QyAgQRPp9y3pfo= @@ -750,7 +752,7 @@ github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108 github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= -github.com/onsi/ginkgo/v2 v2.12.0 h1:UIVDowFPwpg6yMUpPjGkYvf06K3RAiJXUhCxEwQVHRI= +github.com/onsi/ginkgo/v2 v2.13.1 h1:LNGfMbR2OVGBfXjvRZIZ2YCTQdGKtPLvuI1rMCCj3OU= github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= @@ -760,7 +762,7 @@ github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoT github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= -github.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI= +github.com/onsi/gomega v1.30.0 h1:hvMK7xYz4D3HapigLTeGdId/NcfQx1VHMJc60ew99+8= github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= @@ -782,8 +784,8 @@ github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.m github.com/opencontainers/runtime-spec v1.1.1-0.20230823135140-4fec88fd00a4 h1:EctkgBjZ1y4q+sibyuuIgiKpa0QSd2elFtSSdNvBVow= github.com/opencontainers/runtime-spec v1.1.1-0.20230823135140-4fec88fd00a4/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= -github.com/opencontainers/runtime-tools v0.9.1-0.20230317050512-e931285f4b69 h1:NL4xDvl68WWqQ+8WPMM3l5PsZTxaT7Z4K3VSKDRuAGs= -github.com/opencontainers/runtime-tools v0.9.1-0.20230317050512-e931285f4b69/go.mod h1:bNpfuSHA3DZRtD0TPWO8LzgtLpFPTVA/3jDkzD/OPyk= +github.com/opencontainers/runtime-tools v0.9.1-0.20230914150019-408c51e934dc h1:d2hUh5O6MRBvStV55MQ8we08t42zSTqBbscoQccWmMc= +github.com/opencontainers/runtime-tools v0.9.1-0.20230914150019-408c51e934dc/go.mod h1:8tx1helyqhUC65McMm3x7HmOex8lO2/v9zPuxmKHurs= github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE= github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo= github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8= @@ -860,7 +862,7 @@ github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= -github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= @@ -1238,7 +1240,6 @@ golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220817070843-5a390386f1f2/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220823224334-20c2bfdbfe24/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1520,5 +1521,7 @@ sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= -sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= -sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= +tags.cncf.io/container-device-interface v0.6.2 h1:dThE6dtp/93ZDGhqaED2Pu374SOeUkBfuvkLuiTdwzg= +tags.cncf.io/container-device-interface v0.6.2/go.mod h1:Shusyhjs1A5Na/kqPVLL0KqnHQHuunol9LFeUNkuGVE= diff --git a/vendor/github.com/containerd/containerd/log/context_deprecated.go b/vendor/github.com/containerd/containerd/log/context_deprecated.go new file mode 100644 index 00000000..9e9e8b49 --- /dev/null +++ b/vendor/github.com/containerd/containerd/log/context_deprecated.go @@ -0,0 +1,149 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package log + +import ( + "context" + + "github.com/containerd/log" +) + +// G is a shorthand for [GetLogger]. +// +// Deprecated: use [log.G]. +var G = log.G + +// L is an alias for the standard logger. +// +// Deprecated: use [log.L]. +var L = log.L + +// Fields type to pass to "WithFields". +// +// Deprecated: use [log.Fields]. +type Fields = log.Fields + +// Entry is a logging entry. +// +// Deprecated: use [log.Entry]. +type Entry = log.Entry + +// RFC3339NanoFixed is [time.RFC3339Nano] with nanoseconds padded using +// zeros to ensure the formatted time is always the same number of +// characters. +// +// Deprecated: use [log.RFC3339NanoFixed]. +const RFC3339NanoFixed = log.RFC3339NanoFixed + +// Level is a logging level. +// +// Deprecated: use [log.Level]. +type Level = log.Level + +// Supported log levels. +const ( + // TraceLevel level. + // + // Deprecated: use [log.TraceLevel]. + TraceLevel Level = log.TraceLevel + + // DebugLevel level. + // + // Deprecated: use [log.DebugLevel]. + DebugLevel Level = log.DebugLevel + + // InfoLevel level. + // + // Deprecated: use [log.InfoLevel]. + InfoLevel Level = log.InfoLevel + + // WarnLevel level. + // + // Deprecated: use [log.WarnLevel]. + WarnLevel Level = log.WarnLevel + + // ErrorLevel level + // + // Deprecated: use [log.ErrorLevel]. + ErrorLevel Level = log.ErrorLevel + + // FatalLevel level. + // + // Deprecated: use [log.FatalLevel]. + FatalLevel Level = log.FatalLevel + + // PanicLevel level. + // + // Deprecated: use [log.PanicLevel]. + PanicLevel Level = log.PanicLevel +) + +// SetLevel sets log level globally. It returns an error if the given +// level is not supported. +// +// Deprecated: use [log.SetLevel]. +func SetLevel(level string) error { + return log.SetLevel(level) +} + +// GetLevel returns the current log level. +// +// Deprecated: use [log.GetLevel]. +func GetLevel() log.Level { + return log.GetLevel() +} + +// OutputFormat specifies a log output format. +// +// Deprecated: use [log.OutputFormat]. +type OutputFormat = log.OutputFormat + +// Supported log output formats. +const ( + // TextFormat represents the text logging format. + // + // Deprecated: use [log.TextFormat]. + TextFormat log.OutputFormat = "text" + + // JSONFormat represents the JSON logging format. + // + // Deprecated: use [log.JSONFormat]. + JSONFormat log.OutputFormat = "json" +) + +// SetFormat sets the log output format. +// +// Deprecated: use [log.SetFormat]. +func SetFormat(format OutputFormat) error { + return log.SetFormat(format) +} + +// WithLogger returns a new context with the provided logger. Use in +// combination with logger.WithField(s) for great effect. +// +// Deprecated: use [log.WithLogger]. +func WithLogger(ctx context.Context, logger *log.Entry) context.Context { + return log.WithLogger(ctx, logger) +} + +// GetLogger retrieves the current logger from the context. If no logger is +// available, the default logger is returned. +// +// Deprecated: use [log.GetLogger]. +func GetLogger(ctx context.Context) *log.Entry { + return log.GetLogger(ctx) +} diff --git a/vendor/github.com/containerd/log/.golangci.yml b/vendor/github.com/containerd/log/.golangci.yml new file mode 100644 index 00000000..a695775d --- /dev/null +++ b/vendor/github.com/containerd/log/.golangci.yml @@ -0,0 +1,30 @@ +linters: + enable: + - exportloopref # Checks for pointers to enclosing loop variables + - gofmt + - goimports + - gosec + - ineffassign + - misspell + - nolintlint + - revive + - staticcheck + - tenv # Detects using os.Setenv instead of t.Setenv since Go 1.17 + - unconvert + - unused + - vet + - dupword # Checks for duplicate words in the source code + disable: + - errcheck + +run: + timeout: 5m + skip-dirs: + - api + - cluster + - design + - docs + - docs/man + - releases + - reports + - test # e2e scripts diff --git a/vendor/github.com/containerd/log/LICENSE b/vendor/github.com/containerd/log/LICENSE new file mode 100644 index 00000000..584149b6 --- /dev/null +++ b/vendor/github.com/containerd/log/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright The containerd Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/containerd/log/README.md b/vendor/github.com/containerd/log/README.md new file mode 100644 index 00000000..00e08498 --- /dev/null +++ b/vendor/github.com/containerd/log/README.md @@ -0,0 +1,17 @@ +# log + +A Go package providing a common logging interface across containerd repositories and a way for clients to use and configure logging in containerd packages. + +This package is not intended to be used as a standalone logging package outside of the containerd ecosystem and is intended as an interface wrapper around a logging implementation. +In the future this package may be replaced with a common go logging interface. + +## Project details + +**log** is a containerd sub-project, licensed under the [Apache 2.0 license](./LICENSE). +As a containerd sub-project, you will find the: + * [Project governance](https://github.com/containerd/project/blob/main/GOVERNANCE.md), + * [Maintainers](https://github.com/containerd/project/blob/main/MAINTAINERS), + * and [Contributing guidelines](https://github.com/containerd/project/blob/main/CONTRIBUTING.md) + +information in our [`containerd/project`](https://github.com/containerd/project) repository. + diff --git a/vendor/github.com/containerd/containerd/log/context.go b/vendor/github.com/containerd/log/context.go similarity index 100% rename from vendor/github.com/containerd/containerd/log/context.go rename to vendor/github.com/containerd/log/context.go diff --git a/vendor/github.com/containers/common/internal/attributedstring/slice.go b/vendor/github.com/containers/common/internal/attributedstring/slice.go new file mode 100644 index 00000000..ad4acc5e --- /dev/null +++ b/vendor/github.com/containers/common/internal/attributedstring/slice.go @@ -0,0 +1,102 @@ +package attributedstring + +import ( + "bytes" + "fmt" + + "github.com/BurntSushi/toml" +) + +// Slice allows for extending a TOML string array with custom +// attributes that control how the array is marshaled into a Go string. +// +// Specifically, an Slice can be configured to avoid it being +// overridden by a subsequent unmarshal sequence. When the `append` attribute +// is specified, the array will be appended instead (e.g., `array=["9", +// {append=true}]`). +type Slice struct { // A "mixed-type array" in TOML. + // Note that the fields below _must_ be exported. Otherwise the TOML + // encoder would fail during type reflection. + Values []string + Attributes struct { // Using a struct allows for adding more attributes in the future. + Append *bool // Nil if not set by the user + } +} + +// NewSlice creates a new slice with the specified values. +func NewSlice(values []string) Slice { + return Slice{Values: values} +} + +// Get returns the Slice values or an empty string slice. +func (a *Slice) Get() []string { + if a.Values == nil { + return []string{} + } + return a.Values +} + +// Set overrides the values of the Slice. +func (a *Slice) Set(values []string) { + a.Values = values +} + +// UnmarshalTOML is the custom unmarshal method for Slice. +func (a *Slice) UnmarshalTOML(data interface{}) error { + iFaceSlice, ok := data.([]interface{}) + if !ok { + return fmt.Errorf("unable to cast to interface array: %v", data) + } + + var loadedStrings []string + for _, x := range iFaceSlice { // Iterate over each item in the slice. + switch val := x.(type) { + case string: // Strings are directly appended to the slice. + loadedStrings = append(loadedStrings, val) + case map[string]interface{}: // The attribute struct is represented as a map. + for k, v := range val { // Iterate over all _supported_ keys. + switch k { + case "append": + boolVal, ok := v.(bool) + if !ok { + return fmt.Errorf("unable to cast append to bool: %v", k) + } + a.Attributes.Append = &boolVal + default: // Unsupported map key. + return fmt.Errorf("unsupported key %q in map: %v", k, val) + } + } + default: // Unsupported item. + return fmt.Errorf("unsupported item in attributed string slice: %v", x) + } + } + + if a.Attributes.Append != nil && *a.Attributes.Append { // If _explicitly_ configured, append the loaded slice. + a.Values = append(a.Values, loadedStrings...) + } else { // Default: override the existing Slice. + a.Values = loadedStrings + } + return nil +} + +// MarshalTOML is the custom marshal method for Slice. +func (a *Slice) MarshalTOML() ([]byte, error) { + iFaceSlice := make([]interface{}, 0, len(a.Values)) + + for _, x := range a.Values { + iFaceSlice = append(iFaceSlice, x) + } + + if a.Attributes.Append != nil { + Attributes := make(map[string]any) + Attributes["append"] = *a.Attributes.Append + iFaceSlice = append(iFaceSlice, Attributes) + } + + buf := new(bytes.Buffer) + enc := toml.NewEncoder(buf) + if err := enc.Encode(iFaceSlice); err != nil { + return nil, err + } + return buf.Bytes(), nil +} diff --git a/vendor/github.com/containers/common/libimage/copier.go b/vendor/github.com/containers/common/libimage/copier.go index 7a6f1f1b..d6acc732 100644 --- a/vendor/github.com/containers/common/libimage/copier.go +++ b/vendor/github.com/containers/common/libimage/copier.go @@ -1,3 +1,6 @@ +//go:build !remote +// +build !remote + package libimage import ( @@ -11,6 +14,7 @@ import ( "time" "github.com/containers/common/libimage/manifests" + "github.com/containers/common/libimage/platform" "github.com/containers/common/pkg/config" "github.com/containers/common/pkg/retry" "github.com/containers/image/v5/copy" @@ -72,7 +76,7 @@ type CopyOptions struct { // Default 3. MaxRetries *uint // RetryDelay used for the exponential back off of MaxRetries. - // Default 1 time.Scond. + // Default 1 time.Second. RetryDelay *time.Duration // ManifestMIMEType is the desired media type the image will be // converted to if needed. Note that it must contain the exact MIME @@ -239,7 +243,7 @@ func (r *Runtime) newCopier(options *CopyOptions) (*copier, error) { c.systemContext.DockerArchiveAdditionalTags = options.dockerArchiveAdditionalTags - c.systemContext.OSChoice, c.systemContext.ArchitectureChoice, c.systemContext.VariantChoice = NormalizePlatform(options.OS, options.Architecture, options.Variant) + c.systemContext.OSChoice, c.systemContext.ArchitectureChoice, c.systemContext.VariantChoice = platform.Normalize(options.OS, options.Architecture, options.Variant) if options.SignaturePolicyPath != "" { c.systemContext.SignaturePolicyPath = options.SignaturePolicyPath @@ -360,7 +364,11 @@ func (c *copier) copy(ctx context.Context, source, destination types.ImageRefere defer cancel() defer timer.Stop() - fmt.Fprintf(c.imageCopyOptions.ReportWriter, "Pulling image %s inside systemd: setting pull timeout to %s\n", source.DockerReference(), time.Duration(numExtensions)*extension) + fmt.Fprintf(c.imageCopyOptions.ReportWriter, + "Pulling image %s inside systemd: setting pull timeout to %s\n", + source.StringWithinTransport(), + time.Duration(numExtensions)*extension, + ) // From `man systemd.service(5)`: // diff --git a/vendor/github.com/containers/common/libimage/define/platform.go b/vendor/github.com/containers/common/libimage/define/platform.go new file mode 100644 index 00000000..7e13abff --- /dev/null +++ b/vendor/github.com/containers/common/libimage/define/platform.go @@ -0,0 +1,11 @@ +package define + +// PlatformPolicy controls the behavior of image-platform matching. +type PlatformPolicy int + +const ( + // Only debug log if an image does not match the expected platform. + PlatformPolicyDefault PlatformPolicy = iota + // Warn if an image does not match the expected platform. + PlatformPolicyWarn +) diff --git a/vendor/github.com/containers/common/libimage/disk_usage.go b/vendor/github.com/containers/common/libimage/disk_usage.go index f27edc03..765b0df8 100644 --- a/vendor/github.com/containers/common/libimage/disk_usage.go +++ b/vendor/github.com/containers/common/libimage/disk_usage.go @@ -1,3 +1,6 @@ +//go:build !remote +// +build !remote + package libimage import ( diff --git a/vendor/github.com/containers/common/libimage/events.go b/vendor/github.com/containers/common/libimage/events.go index c7733564..5d82efa6 100644 --- a/vendor/github.com/containers/common/libimage/events.go +++ b/vendor/github.com/containers/common/libimage/events.go @@ -1,3 +1,6 @@ +//go:build !remote +// +build !remote + package libimage import ( diff --git a/vendor/github.com/containers/common/libimage/filters.go b/vendor/github.com/containers/common/libimage/filters.go index fdde5e83..b51853af 100644 --- a/vendor/github.com/containers/common/libimage/filters.go +++ b/vendor/github.com/containers/common/libimage/filters.go @@ -1,3 +1,6 @@ +//go:build !remote +// +build !remote + package libimage import ( @@ -11,7 +14,6 @@ import ( filtersPkg "github.com/containers/common/pkg/filters" "github.com/containers/common/pkg/timetype" "github.com/containers/image/v5/docker/reference" - "github.com/opencontainers/go-digest" "github.com/sirupsen/logrus" ) @@ -290,7 +292,7 @@ func filterReferences(r *Runtime, value string) filterFunc { refString := ref.String() // FQN with tag/digest candidates := []string{refString} - // Split the reference into 3 components (twice if diggested/tagged): + // Split the reference into 3 components (twice if digested/tagged): // 1) Fully-qualified reference // 2) Without domain // 3) Without domain and path @@ -394,18 +396,17 @@ func filterDangling(ctx context.Context, value bool, tree *layerTree) filterFunc // filterID creates an image-ID filter for matching the specified value. func filterID(value string) filterFunc { return func(img *Image) (bool, error) { - return img.ID() == value, nil + return strings.HasPrefix(img.ID(), value), nil } } // filterDigest creates a digest filter for matching the specified value. func filterDigest(value string) (filterFunc, error) { - d, err := digest.Parse(value) - if err != nil { - return nil, fmt.Errorf("invalid value %q for digest filter: %w", value, err) + if !strings.HasPrefix(value, "sha256:") { + return nil, fmt.Errorf("invalid value %q for digest filter", value) } return func(img *Image) (bool, error) { - return img.hasDigest(d), nil + return img.containsDigestPrefix(value), nil }, nil } diff --git a/vendor/github.com/containers/common/libimage/history.go b/vendor/github.com/containers/common/libimage/history.go index ad989b52..ccd81096 100644 --- a/vendor/github.com/containers/common/libimage/history.go +++ b/vendor/github.com/containers/common/libimage/history.go @@ -1,10 +1,12 @@ +//go:build !remote +// +build !remote + package libimage import ( "context" + "fmt" "time" - - "github.com/containers/storage" ) // ImageHistory contains the history information of an image. @@ -29,17 +31,19 @@ func (i *Image) History(ctx context.Context) ([]ImageHistory, error) { return nil, err } - var allHistory []ImageHistory - var layer *storage.Layer + var nextNode *layerNode if i.TopLayer() != "" { - layer, err = i.runtime.store.Layer(i.TopLayer()) + layer, err := i.runtime.store.Layer(i.TopLayer()) if err != nil { return nil, err } + nextNode = layerTree.node(layer.ID) } // Iterate in reverse order over the history entries, and lookup the - // corresponding image ID, size and get the next later if needed. + // corresponding image ID, size. If it's a non-empty history entry, + // pick the next "storage" layer by walking the layer tree. + var allHistory []ImageHistory numHistories := len(ociImage.History) - 1 usedIDs := make(map[string]bool) // prevents assigning images IDs more than once for x := numHistories; x >= 0; x-- { @@ -50,31 +54,25 @@ func (i *Image) History(ctx context.Context) ([]ImageHistory, error) { Comment: ociImage.History[x].Comment, } - if layer != nil { - if !ociImage.History[x].EmptyLayer { - history.Size = layer.UncompressedSize + if nextNode != nil && len(nextNode.images) > 0 { + id := nextNode.images[0].ID() // always use the first one + if _, used := usedIDs[id]; !used { + history.ID = id + usedIDs[id] = true } - // Query the layer tree if it's the top layer of an - // image. - node := layerTree.node(layer.ID) - if len(node.images) > 0 { - id := node.images[0].ID() // always use the first one - if _, used := usedIDs[id]; !used { - history.ID = id - usedIDs[id] = true - } - for i := range node.images { - history.Tags = append(history.Tags, node.images[i].Names()...) - } + for i := range nextNode.images { + history.Tags = append(history.Tags, nextNode.images[i].Names()...) } - if layer.Parent == "" { - layer = nil - } else if !ociImage.History[x].EmptyLayer { - layer, err = i.runtime.store.Layer(layer.Parent) - if err != nil { - return nil, err - } + } + + if !ociImage.History[x].EmptyLayer { + if nextNode == nil { // If no layer's left, something's wrong. + return nil, fmt.Errorf("no layer left for non-empty history entry: %v", history) } + + history.Size = nextNode.layer.UncompressedSize + + nextNode = nextNode.parent } allHistory = append(allHistory, history) diff --git a/vendor/github.com/containers/common/libimage/image.go b/vendor/github.com/containers/common/libimage/image.go index 640968fd..4d106d42 100644 --- a/vendor/github.com/containers/common/libimage/image.go +++ b/vendor/github.com/containers/common/libimage/image.go @@ -1,3 +1,6 @@ +//go:build !remote +// +build !remote + package libimage import ( @@ -9,6 +12,8 @@ import ( "strings" "time" + "github.com/containerd/containerd/platforms" + "github.com/containers/common/libimage/platform" "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/manifest" storageTransport "github.com/containers/image/v5/storage" @@ -169,6 +174,17 @@ func (i *Image) hasDigest(wantedDigest digest.Digest) bool { return false } +// containsDigestPrefix returns whether the specified value matches any digest of the +// image. It checks for the prefix and not a full match. +func (i *Image) containsDigestPrefix(wantedDigestPrefix string) bool { + for _, d := range i.Digests() { + if strings.HasPrefix(d.String(), wantedDigestPrefix) { + return true + } + } + return false +} + // IsReadOnly returns whether the image is set read only. func (i *Image) IsReadOnly() bool { return i.storageImage.ReadOnly @@ -567,8 +583,7 @@ func (i *Image) Tag(name string) error { defer i.runtime.writeEvent(&Event{ID: i.ID(), Name: name, Time: time.Now(), Type: EventTypeImageTag}) } - newNames := append(i.Names(), ref.String()) - if err := i.runtime.store.SetNames(i.ID(), newNames); err != nil { + if err := i.runtime.store.AddNames(i.ID(), []string{ref.String()}); err != nil { return err } @@ -592,7 +607,7 @@ func (i *Image) Untag(name string) error { ref, err := NormalizeName(name) if err != nil { - return fmt.Errorf("normalizing name %q: %w", name, err) + return err } // FIXME: this is breaking Podman CI but must be re-enabled once @@ -607,26 +622,25 @@ func (i *Image) Untag(name string) error { name = ref.String() - logrus.Debugf("Untagging %q from image %s", ref.String(), i.ID()) - if i.runtime.eventChannel != nil { - defer i.runtime.writeEvent(&Event{ID: i.ID(), Name: name, Time: time.Now(), Type: EventTypeImageUntag}) - } - - removedName := false - newNames := []string{} + foundName := false for _, n := range i.Names() { if n == name { - removedName = true - continue + foundName = true + break } - newNames = append(newNames, n) } - - if !removedName { + // Return an error if the name is not found, the c/storage + // RemoveNames() API does not create one if no match is found. + if !foundName { return fmt.Errorf("%s: %w", name, errTagUnknown) } - if err := i.runtime.store.SetNames(i.ID(), newNames); err != nil { + logrus.Debugf("Untagging %q from image %s", ref.String(), i.ID()) + if i.runtime.eventChannel != nil { + defer i.runtime.writeEvent(&Event{ID: i.ID(), Name: name, Time: time.Now(), Type: EventTypeImageUntag}) + } + + if err := i.runtime.store.RemoveNames(i.ID(), []string{name}); err != nil { return err } @@ -1010,3 +1024,35 @@ func getImageID(ctx context.Context, src types.ImageReference, sys *types.System } return "@" + imageDigest.Encoded(), nil } + +// Checks whether the image matches the specified platform. +// Returns +// - 1) a matching error that can be used for logging (or returning) what does not match +// - 2) a bool indicating whether architecture, os or variant were set (some callers need that to decide whether they need to throw an error) +// - 3) a fatal error that occurred prior to check for matches (e.g., storage errors etc.) +func (i *Image) matchesPlatform(ctx context.Context, os, arch, variant string) (error, bool, error) { + if err := i.isCorrupted(""); err != nil { + return err, false, nil + } + inspectInfo, err := i.inspectInfo(ctx) + if err != nil { + return nil, false, fmt.Errorf("inspecting image: %w", err) + } + + customPlatform := len(os)+len(arch)+len(variant) != 0 + + expected, err := platforms.Parse(platform.ToString(os, arch, variant)) + if err != nil { + return nil, false, fmt.Errorf("parsing host platform: %v", err) + } + fromImage, err := platforms.Parse(platform.ToString(inspectInfo.Os, inspectInfo.Architecture, inspectInfo.Variant)) + if err != nil { + return nil, false, fmt.Errorf("parsing image platform: %v", err) + } + + if platforms.NewMatcher(expected).Match(fromImage) { + return nil, customPlatform, nil + } + + return fmt.Errorf("image platform (%s) does not match the expected platform (%s)", platforms.Format(fromImage), platforms.Format(expected)), customPlatform, nil +} diff --git a/vendor/github.com/containers/common/libimage/image_config.go b/vendor/github.com/containers/common/libimage/image_config.go index 69b7debd..9f5841fe 100644 --- a/vendor/github.com/containers/common/libimage/image_config.go +++ b/vendor/github.com/containers/common/libimage/image_config.go @@ -1,3 +1,6 @@ +//go:build !remote +// +build !remote + package libimage import ( diff --git a/vendor/github.com/containers/common/libimage/image_tree.go b/vendor/github.com/containers/common/libimage/image_tree.go index 9c958ce6..8143d377 100644 --- a/vendor/github.com/containers/common/libimage/image_tree.go +++ b/vendor/github.com/containers/common/libimage/image_tree.go @@ -1,3 +1,6 @@ +//go:build !remote +// +build !remote + package libimage import ( diff --git a/vendor/github.com/containers/common/libimage/import.go b/vendor/github.com/containers/common/libimage/import.go index b276c365..5519f02b 100644 --- a/vendor/github.com/containers/common/libimage/import.go +++ b/vendor/github.com/containers/common/libimage/import.go @@ -1,3 +1,6 @@ +//go:build !remote +// +build !remote + package libimage import ( diff --git a/vendor/github.com/containers/common/libimage/inspect.go b/vendor/github.com/containers/common/libimage/inspect.go index c6632d9a..1003b648 100644 --- a/vendor/github.com/containers/common/libimage/inspect.go +++ b/vendor/github.com/containers/common/libimage/inspect.go @@ -1,3 +1,6 @@ +//go:build !remote +// +build !remote + package libimage import ( diff --git a/vendor/github.com/containers/common/libimage/layer_tree.go b/vendor/github.com/containers/common/libimage/layer_tree.go index e6b012f9..71eafb0e 100644 --- a/vendor/github.com/containers/common/libimage/layer_tree.go +++ b/vendor/github.com/containers/common/libimage/layer_tree.go @@ -1,3 +1,6 @@ +//go:build !remote +// +build !remote + package libimage import ( @@ -199,6 +202,17 @@ func (t *layerTree) children(ctx context.Context, parent *Image, all bool) ([]*I if parent.TopLayer() == "" { for i := range t.emptyImages { empty := t.emptyImages[i] + isManifest, err := empty.IsManifestList(ctx) + if err != nil { + return nil, err + } + if isManifest { + // If this is a manifest list and is already + // marked as empty then no instance can be + // selected from this list therefore its + // better to skip this. + continue + } isParent, err := checkParent(empty) if err != nil { return nil, err @@ -289,6 +303,17 @@ func (t *layerTree) parent(ctx context.Context, child *Image) (*Image, error) { if childID == empty.ID() { continue } + isManifest, err := empty.IsManifestList(ctx) + if err != nil { + return nil, err + } + if isManifest { + // If this is a manifest list and is already + // marked as empty then no instance can be + // selected from this list therefore its + // better to skip this. + continue + } emptyOCI, err := t.toOCI(ctx, empty) if err != nil { if ErrorIsImageUnknown(err) { diff --git a/vendor/github.com/containers/common/libimage/load.go b/vendor/github.com/containers/common/libimage/load.go index 72c56f12..36283a99 100644 --- a/vendor/github.com/containers/common/libimage/load.go +++ b/vendor/github.com/containers/common/libimage/load.go @@ -1,3 +1,6 @@ +//go:build !remote +// +build !remote + package libimage import ( diff --git a/vendor/github.com/containers/common/libimage/manifest_list.go b/vendor/github.com/containers/common/libimage/manifest_list.go index 83989236..c36bfda9 100644 --- a/vendor/github.com/containers/common/libimage/manifest_list.go +++ b/vendor/github.com/containers/common/libimage/manifest_list.go @@ -1,3 +1,6 @@ +//go:build !remote +// +build !remote + package libimage import ( @@ -440,6 +443,8 @@ func (m *ManifestList) Push(ctx context.Context, destination string, options *Ma SignSigstorePrivateKeyPassphrase: options.SignSigstorePrivateKeyPassphrase, RemoveSignatures: options.RemoveSignatures, ManifestType: options.ManifestMIMEType, + MaxRetries: options.MaxRetries, + RetryDelay: options.RetryDelay, ForceCompressionFormat: options.ForceCompressionFormat, } diff --git a/vendor/github.com/containers/common/libimage/manifests/manifests.go b/vendor/github.com/containers/common/libimage/manifests/manifests.go index 3c066e04..d28ac87b 100644 --- a/vendor/github.com/containers/common/libimage/manifests/manifests.go +++ b/vendor/github.com/containers/common/libimage/manifests/manifests.go @@ -4,11 +4,12 @@ import ( "context" "encoding/json" "errors" - stderrors "errors" "fmt" "io" + "time" "github.com/containers/common/pkg/manifests" + "github.com/containers/common/pkg/retry" "github.com/containers/common/pkg/supplemented" cp "github.com/containers/image/v5/copy" "github.com/containers/image/v5/docker/reference" @@ -28,6 +29,10 @@ import ( "github.com/sirupsen/logrus" ) +const ( + defaultMaxRetries = 3 +) + const instancesData = "instances.json" // LookupReferenceFunc return an image reference based on the specified one. @@ -38,7 +43,7 @@ type LookupReferenceFunc func(ref types.ImageReference) (types.ImageReference, e // ErrListImageUnknown is returned when we attempt to create an image reference // for a List that has not yet been saved to an image. -var ErrListImageUnknown = stderrors.New("unable to determine which image holds the manifest list") +var ErrListImageUnknown = errors.New("unable to determine which image holds the manifest list") type list struct { manifests.List @@ -73,6 +78,11 @@ type PushOptions struct { SourceFilter LookupReferenceFunc // filter the list source AddCompression []string // add existing instances with requested compression algorithms to manifest list ForceCompressionFormat bool // force push with requested compression ignoring the blobs which can be reused. + // Maximum number of retries with exponential backoff when facing + // transient network errors. Default 3. + MaxRetries *uint + // RetryDelay used for the exponential back off of MaxRetries. + RetryDelay *time.Duration } // Create creates a new list containing information about the specified image, @@ -263,16 +273,31 @@ func (l *list) Push(ctx context.Context, dest types.ImageReference, options Push ForceCompressionFormat: options.ForceCompressionFormat, } - // Copy whatever we were asked to copy. - manifestBytes, err := cp.Image(ctx, policyContext, dest, src, copyOptions) - if err != nil { - return nil, "", err + retryOptions := retry.Options{} + retryOptions.MaxRetry = defaultMaxRetries + if options.MaxRetries != nil { + retryOptions.MaxRetry = int(*options.MaxRetries) } - manifestDigest, err := manifest.Digest(manifestBytes) - if err != nil { - return nil, "", err + if options.RetryDelay != nil { + retryOptions.Delay = *options.RetryDelay + } + + // Copy whatever we were asked to copy. + var manifestDigest digest.Digest + f := func() error { + opts := copyOptions + var manifestBytes []byte + var digest digest.Digest + var err error + if manifestBytes, err = cp.Image(ctx, policyContext, dest, src, opts); err == nil { + if digest, err = manifest.Digest(manifestBytes); err == nil { + manifestDigest = digest + } + } + return err } - return nil, manifestDigest, nil + err = retry.IfNecessary(ctx, f, &retryOptions) + return nil, manifestDigest, err } func prepareAddWithCompression(variants []string) ([]cp.OptionCompressionVariant, error) { diff --git a/vendor/github.com/containers/common/libimage/normalize.go b/vendor/github.com/containers/common/libimage/normalize.go index 9619b1a0..2b340286 100644 --- a/vendor/github.com/containers/common/libimage/normalize.go +++ b/vendor/github.com/containers/common/libimage/normalize.go @@ -1,3 +1,6 @@ +//go:build !remote +// +build !remote + package libimage import ( diff --git a/vendor/github.com/containers/common/libimage/oci.go b/vendor/github.com/containers/common/libimage/oci.go index b88d6613..fcbd10ad 100644 --- a/vendor/github.com/containers/common/libimage/oci.go +++ b/vendor/github.com/containers/common/libimage/oci.go @@ -1,3 +1,6 @@ +//go:build !remote +// +build !remote + package libimage import ( diff --git a/vendor/github.com/containers/common/libimage/platform.go b/vendor/github.com/containers/common/libimage/platform.go index 06c15ee6..c378bc27 100644 --- a/vendor/github.com/containers/common/libimage/platform.go +++ b/vendor/github.com/containers/common/libimage/platform.go @@ -1,100 +1,29 @@ +//go:build !remote +// +build !remote + package libimage import ( - "context" - "fmt" - "runtime" - - "github.com/containerd/containerd/platforms" - v1 "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/sirupsen/logrus" + "github.com/containers/common/libimage/define" + "github.com/containers/common/libimage/platform" ) // PlatformPolicy controls the behavior of image-platform matching. -type PlatformPolicy int +// Deprecated: new code should use define.PlatformPolicy directly. +type PlatformPolicy = define.PlatformPolicy const ( // Only debug log if an image does not match the expected platform. - PlatformPolicyDefault PlatformPolicy = iota + // Deprecated: new code should reference define.PlatformPolicyDefault directly. + PlatformPolicyDefault = define.PlatformPolicyDefault // Warn if an image does not match the expected platform. - PlatformPolicyWarn + // Deprecated: new code should reference define.PlatformPolicyWarn directly. + PlatformPolicyWarn = define.PlatformPolicyWarn ) // NormalizePlatform normalizes (according to the OCI spec) the specified os, -// arch and variant. If left empty, the individual item will be normalized. +// arch and variant. If left empty, the individual item will be normalized. +// Deprecated: new code should call libimage/platform.Normalize() instead. func NormalizePlatform(rawOS, rawArch, rawVariant string) (os, arch, variant string) { - platformSpec := v1.Platform{ - OS: rawOS, - Architecture: rawArch, - Variant: rawVariant, - } - normalizedSpec := platforms.Normalize(platformSpec) - if normalizedSpec.Variant == "" && rawVariant != "" { - normalizedSpec.Variant = rawVariant - } - rawPlatform := toPlatformString(normalizedSpec.OS, normalizedSpec.Architecture, normalizedSpec.Variant) - normalizedPlatform, err := platforms.Parse(rawPlatform) - if err != nil { - logrus.Debugf("Error normalizing platform: %v", err) - return rawOS, rawArch, rawVariant - } - logrus.Debugf("Normalized platform %s to %s", rawPlatform, normalizedPlatform) - os = rawOS - if rawOS != "" { - os = normalizedPlatform.OS - } - arch = rawArch - if rawArch != "" { - arch = normalizedPlatform.Architecture - } - variant = rawVariant - if rawVariant != "" || (rawVariant == "" && normalizedPlatform.Variant != "") { - variant = normalizedPlatform.Variant - } - return os, arch, variant -} - -func toPlatformString(os, arch, variant string) string { - if os == "" { - os = runtime.GOOS - } - if arch == "" { - arch = runtime.GOARCH - } - if variant == "" { - return fmt.Sprintf("%s/%s", os, arch) - } - return fmt.Sprintf("%s/%s/%s", os, arch, variant) -} - -// Checks whether the image matches the specified platform. -// Returns -// - 1) a matching error that can be used for logging (or returning) what does not match -// - 2) a bool indicating whether architecture, os or variant were set (some callers need that to decide whether they need to throw an error) -// - 3) a fatal error that occurred prior to check for matches (e.g., storage errors etc.) -func (i *Image) matchesPlatform(ctx context.Context, os, arch, variant string) (error, bool, error) { - if err := i.isCorrupted(""); err != nil { - return err, false, nil - } - inspectInfo, err := i.inspectInfo(ctx) - if err != nil { - return nil, false, fmt.Errorf("inspecting image: %w", err) - } - - customPlatform := len(os)+len(arch)+len(variant) != 0 - - expected, err := platforms.Parse(toPlatformString(os, arch, variant)) - if err != nil { - return nil, false, fmt.Errorf("parsing host platform: %v", err) - } - fromImage, err := platforms.Parse(toPlatformString(inspectInfo.Os, inspectInfo.Architecture, inspectInfo.Variant)) - if err != nil { - return nil, false, fmt.Errorf("parsing image platform: %v", err) - } - - if platforms.NewMatcher(expected).Match(fromImage) { - return nil, customPlatform, nil - } - - return fmt.Errorf("image platform (%s) does not match the expected platform (%s)", platforms.Format(fromImage), platforms.Format(expected)), customPlatform, nil + return platform.Normalize(rawOS, rawArch, rawVariant) } diff --git a/vendor/github.com/containers/common/libimage/platform/platform.go b/vendor/github.com/containers/common/libimage/platform/platform.go new file mode 100644 index 00000000..2272cee7 --- /dev/null +++ b/vendor/github.com/containers/common/libimage/platform/platform.go @@ -0,0 +1,57 @@ +package platform + +import ( + "fmt" + "runtime" + + "github.com/containerd/containerd/platforms" + v1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/sirupsen/logrus" +) + +// Normalize normalizes (according to the OCI spec) the specified os, +// arch and variant. If left empty, the individual item will be normalized. +func Normalize(rawOS, rawArch, rawVariant string) (os, arch, variant string) { + platformSpec := v1.Platform{ + OS: rawOS, + Architecture: rawArch, + Variant: rawVariant, + } + normalizedSpec := platforms.Normalize(platformSpec) + if normalizedSpec.Variant == "" && rawVariant != "" { + normalizedSpec.Variant = rawVariant + } + rawPlatform := ToString(normalizedSpec.OS, normalizedSpec.Architecture, normalizedSpec.Variant) + normalizedPlatform, err := platforms.Parse(rawPlatform) + if err != nil { + logrus.Debugf("Error normalizing platform: %v", err) + return rawOS, rawArch, rawVariant + } + logrus.Debugf("Normalized platform %s to %s", rawPlatform, normalizedPlatform) + os = rawOS + if rawOS != "" { + os = normalizedPlatform.OS + } + arch = rawArch + if rawArch != "" { + arch = normalizedPlatform.Architecture + } + variant = rawVariant + if rawVariant != "" || (rawVariant == "" && normalizedPlatform.Variant != "") { + variant = normalizedPlatform.Variant + } + return os, arch, variant +} + +func ToString(os, arch, variant string) string { + if os == "" { + os = runtime.GOOS + } + if arch == "" { + arch = runtime.GOARCH + } + if variant == "" { + return fmt.Sprintf("%s/%s", os, arch) + } + return fmt.Sprintf("%s/%s/%s", os, arch, variant) +} diff --git a/vendor/github.com/containers/common/libimage/pull.go b/vendor/github.com/containers/common/libimage/pull.go index 579eae2c..bc8e8498 100644 --- a/vendor/github.com/containers/common/libimage/pull.go +++ b/vendor/github.com/containers/common/libimage/pull.go @@ -1,3 +1,6 @@ +//go:build !remote +// +build !remote + package libimage import ( diff --git a/vendor/github.com/containers/common/libimage/push.go b/vendor/github.com/containers/common/libimage/push.go index 7203838a..ed1d90c1 100644 --- a/vendor/github.com/containers/common/libimage/push.go +++ b/vendor/github.com/containers/common/libimage/push.go @@ -1,3 +1,6 @@ +//go:build !remote +// +build !remote + package libimage import ( diff --git a/vendor/github.com/containers/common/libimage/runtime.go b/vendor/github.com/containers/common/libimage/runtime.go index 6d90272c..1948fe0a 100644 --- a/vendor/github.com/containers/common/libimage/runtime.go +++ b/vendor/github.com/containers/common/libimage/runtime.go @@ -1,3 +1,6 @@ +//go:build !remote +// +build !remote + package libimage import ( @@ -7,6 +10,8 @@ import ( "os" "strings" + "github.com/containers/common/libimage/define" + "github.com/containers/common/libimage/platform" "github.com/containers/common/pkg/config" "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/pkg/shortnames" @@ -184,7 +189,7 @@ type LookupImageOptions struct { Variant string // Controls the behavior when checking the platform of an image. - PlatformPolicy PlatformPolicy + PlatformPolicy define.PlatformPolicy // If set, do not look for items/instances in the manifest list that // match the current platform but return the manifest list as is. @@ -283,7 +288,7 @@ func (r *Runtime) LookupImage(name string, options *LookupImageOptions) (*Image, options.Variant = r.systemContext.VariantChoice } // Normalize platform to be OCI compatible (e.g., "aarch64" -> "arm64"). - options.OS, options.Architecture, options.Variant = NormalizePlatform(options.OS, options.Architecture, options.Variant) + options.OS, options.Architecture, options.Variant = platform.Normalize(options.OS, options.Architecture, options.Variant) // Second, try out the candidates as resolved by shortnames. This takes // "localhost/" prefixed images into account as well. @@ -435,9 +440,9 @@ func (r *Runtime) lookupImageInLocalStorage(name, candidate string, namedCandida return nil, nil } switch options.PlatformPolicy { - case PlatformPolicyDefault: + case define.PlatformPolicyDefault: logrus.Debugf("%v", matchError) - case PlatformPolicyWarn: + case define.PlatformPolicyWarn: logrus.Warnf("%v", matchError) } } diff --git a/vendor/github.com/containers/common/libimage/save.go b/vendor/github.com/containers/common/libimage/save.go index a42bbb49..47a3a566 100644 --- a/vendor/github.com/containers/common/libimage/save.go +++ b/vendor/github.com/containers/common/libimage/save.go @@ -1,3 +1,6 @@ +//go:build !remote +// +build !remote + package libimage import ( diff --git a/vendor/github.com/containers/common/libimage/search.go b/vendor/github.com/containers/common/libimage/search.go index 618850e6..9ef0e832 100644 --- a/vendor/github.com/containers/common/libimage/search.go +++ b/vendor/github.com/containers/common/libimage/search.go @@ -1,3 +1,6 @@ +//go:build !remote +// +build !remote + package libimage import ( diff --git a/vendor/github.com/containers/common/libnetwork/cni/network.go b/vendor/github.com/containers/common/libnetwork/cni/network.go index 8180b49b..49d20b91 100644 --- a/vendor/github.com/containers/common/libnetwork/cni/network.go +++ b/vendor/github.com/containers/common/libnetwork/cni/network.go @@ -9,6 +9,7 @@ import ( "encoding/hex" "errors" "fmt" + "io/fs" "os" "path/filepath" "strings" @@ -17,7 +18,7 @@ import ( "github.com/containernetworking/cni/libcni" "github.com/containers/common/libnetwork/types" "github.com/containers/common/pkg/config" - cutil "github.com/containers/common/pkg/util" + "github.com/containers/common/pkg/version" "github.com/containers/storage/pkg/lockfile" "github.com/containers/storage/pkg/unshare" "github.com/sirupsen/logrus" @@ -201,7 +202,10 @@ func (n *cniNetwork) loadNetworks() error { net, err := createNetworkFromCNIConfigList(conf, file) if err != nil { - logrus.Errorf("CNI config list %s could not be converted to a libpod config, skipping: %v", file, err) + // ignore ENOENT as the config has been removed in the meantime so we can just ignore this case + if !errors.Is(err, fs.ErrNotExist) { + logrus.Errorf("CNI config list %s could not be converted to a libpod config, skipping: %v", file, err) + } continue } logrus.Debugf("Successfully loaded network %s: %v", net.Name, net) @@ -302,8 +306,8 @@ func (n *cniNetwork) NetworkInfo() types.NetworkInfo { path := "" packageVersion := "" for _, p := range n.cniPluginDirs { - ver := cutil.PackageVersion(p) - if ver != cutil.UnknownPackage { + ver := version.Package(p) + if ver != version.UnknownPackage { path = p packageVersion = ver break @@ -317,8 +321,8 @@ func (n *cniNetwork) NetworkInfo() types.NetworkInfo { } dnsPath := filepath.Join(path, "dnsname") - dnsPackage := cutil.PackageVersion(dnsPath) - dnsProgram, err := cutil.ProgramVersionDnsname(dnsPath) + dnsPackage := version.Package(dnsPath) + dnsProgram, err := version.ProgramDnsname(dnsPath) if err != nil { logrus.Infof("Failed to get the dnsname plugin version: %v", err) } diff --git a/vendor/github.com/containers/common/libnetwork/netavark/config.go b/vendor/github.com/containers/common/libnetwork/netavark/config.go index 45b49bf2..de7af957 100644 --- a/vendor/github.com/containers/common/libnetwork/netavark/config.go +++ b/vendor/github.com/containers/common/libnetwork/netavark/config.go @@ -204,7 +204,10 @@ func (n *netavarkNetwork) networkCreate(newNetwork *types.Network, defaultNet bo } // rust only support "true" or "false" while go can parse 1 and 0 as well so we need to change it newNetwork.Options[types.NoDefaultRoute] = strconv.FormatBool(val) - + case types.VRFOption: + if len(value) == 0 { + return nil, errors.New("invalid vrf name") + } default: return nil, fmt.Errorf("unsupported bridge network option %s", key) } diff --git a/vendor/github.com/containers/common/libnetwork/netavark/network.go b/vendor/github.com/containers/common/libnetwork/netavark/network.go index 17bd863a..0d323db2 100644 --- a/vendor/github.com/containers/common/libnetwork/netavark/network.go +++ b/vendor/github.com/containers/common/libnetwork/netavark/network.go @@ -15,7 +15,7 @@ import ( "github.com/containers/common/libnetwork/internal/util" "github.com/containers/common/libnetwork/types" "github.com/containers/common/pkg/config" - cutil "github.com/containers/common/pkg/util" + "github.com/containers/common/pkg/version" "github.com/containers/storage/pkg/lockfile" "github.com/containers/storage/pkg/unshare" "github.com/sirupsen/logrus" @@ -53,7 +53,7 @@ type netavarkNetwork struct { // ipamDBPath is the path to the ip allocation bolt db ipamDBPath string - // syslog describes whenever the netavark debbug output should be log to the syslog as well. + // syslog describes whenever the netavark debug output should be log to the syslog as well. // This will use logrus to do so, make sure logrus is set up to log to the syslog. syslog bool @@ -93,7 +93,7 @@ type InitConfig struct { // PluginDirs list of directories were netavark plugins are located PluginDirs []string - // Syslog describes whenever the netavark debbug output should be log to the syslog as well. + // Syslog describes whenever the netavark debug output should be log to the syslog as well. // This will use logrus to do so, make sure logrus is set up to log to the syslog. Syslog bool } @@ -341,8 +341,8 @@ func (n *netavarkNetwork) DefaultInterfaceName() string { // package version and program version. func (n *netavarkNetwork) NetworkInfo() types.NetworkInfo { path := n.netavarkBinary - packageVersion := cutil.PackageVersion(path) - programVersion, err := cutil.ProgramVersion(path) + packageVersion := version.Package(path) + programVersion, err := version.Program(path) if err != nil { logrus.Infof("Failed to get the netavark version: %v", err) } @@ -354,8 +354,8 @@ func (n *netavarkNetwork) NetworkInfo() types.NetworkInfo { } dnsPath := n.aardvarkBinary - dnsPackage := cutil.PackageVersion(dnsPath) - dnsProgram, err := cutil.ProgramVersion(dnsPath) + dnsPackage := version.Package(dnsPath) + dnsProgram, err := version.Program(dnsPath) if err != nil { logrus.Infof("Failed to get the aardvark version: %v", err) } diff --git a/vendor/github.com/containers/common/libnetwork/network/interface.go b/vendor/github.com/containers/common/libnetwork/network/interface.go index a717775a..aeac8d9c 100644 --- a/vendor/github.com/containers/common/libnetwork/network/interface.go +++ b/vendor/github.com/containers/common/libnetwork/network/interface.go @@ -81,7 +81,7 @@ func NetworkBackend(store storage.Store, conf *config.Config, syslog bool) (type NetworkRunDir: runDir, NetavarkBinary: netavarkBin, AardvarkBinary: aardvarkBin, - PluginDirs: conf.Network.NetavarkPluginDirs, + PluginDirs: conf.Network.NetavarkPluginDirs.Get(), DefaultNetwork: conf.Network.DefaultNetwork, DefaultSubnet: conf.Network.DefaultSubnet, DefaultsubnetPools: conf.Network.DefaultSubnetPools, @@ -181,7 +181,7 @@ func getCniInterface(conf *config.Config) (types.ContainerNetwork, error) { } return cni.NewCNINetworkInterface(&cni.InitConfig{ CNIConfigDir: confDir, - CNIPluginDirs: conf.Network.CNIPluginDirs, + CNIPluginDirs: conf.Network.CNIPluginDirs.Get(), RunDir: conf.Engine.TmpDir, DefaultNetwork: conf.Network.DefaultNetwork, DefaultSubnet: conf.Network.DefaultSubnet, diff --git a/vendor/github.com/containers/common/libnetwork/pasta/pasta.go b/vendor/github.com/containers/common/libnetwork/pasta/pasta.go index 6a53afe3..b787a781 100644 --- a/vendor/github.com/containers/common/libnetwork/pasta/pasta.go +++ b/vendor/github.com/containers/common/libnetwork/pasta/pasta.go @@ -84,7 +84,7 @@ func Setup(opts *SetupOptions) error { } // first append options set in the config - cmdArgs = append(cmdArgs, opts.Config.Network.PastaOptions...) + cmdArgs = append(cmdArgs, opts.Config.Network.PastaOptions.Get()...) // then append the ones that were set on the cli cmdArgs = append(cmdArgs, opts.ExtraOptions...) diff --git a/vendor/github.com/containers/common/libnetwork/slirp4netns/slirp4netns.go b/vendor/github.com/containers/common/libnetwork/slirp4netns/slirp4netns.go index 0e53a922..43ca9780 100644 --- a/vendor/github.com/containers/common/libnetwork/slirp4netns/slirp4netns.go +++ b/vendor/github.com/containers/common/libnetwork/slirp4netns/slirp4netns.go @@ -124,8 +124,8 @@ func checkSlirpFlags(path string) (*slirpFeatures, error) { } func parseNetworkOptions(config *config.Config, extraOptions []string) (*networkOptions, error) { - options := make([]string, 0, len(config.Engine.NetworkCmdOptions)+len(extraOptions)) - options = append(options, config.Engine.NetworkCmdOptions...) + options := make([]string, 0, len(config.Engine.NetworkCmdOptions.Get())+len(extraOptions)) + options = append(options, config.Engine.NetworkCmdOptions.Get()...) options = append(options, extraOptions...) opts := &networkOptions{ // overwrite defaults diff --git a/vendor/github.com/containers/common/libnetwork/types/const.go b/vendor/github.com/containers/common/libnetwork/types/const.go index 83103ef6..a9161820 100644 --- a/vendor/github.com/containers/common/libnetwork/types/const.go +++ b/vendor/github.com/containers/common/libnetwork/types/const.go @@ -43,6 +43,7 @@ const ( MetricOption = "metric" NoDefaultRoute = "no_default_route" BclimOption = "bclim" + VRFOption = "vrf" ) type NetworkBackend string diff --git a/vendor/github.com/containers/common/libnetwork/util/filters.go b/vendor/github.com/containers/common/libnetwork/util/filters.go index 782c5d2b..70f90918 100644 --- a/vendor/github.com/containers/common/libnetwork/util/filters.go +++ b/vendor/github.com/containers/common/libnetwork/util/filters.go @@ -38,7 +38,7 @@ func createFilterFuncs(key string, filterValues []string) (types.FilterFunc, err case "id": // matches part of one id return func(net types.Network) bool { - return util.FilterID(net.ID, filterValues) + return filters.FilterID(net.ID, filterValues) }, nil // TODO: add dns enabled, internal filter @@ -67,7 +67,7 @@ func createPruneFilterFuncs(key string, filterValues []string) (types.FilterFunc }, nil case "label!": return func(net types.Network) bool { - return !filters.MatchLabelFilters(filterValues, net.Labels) + return filters.MatchNegatedLabelFilters(filterValues, net.Labels) }, nil case "until": until, err := filters.ComputeUntilTimestamp(filterValues) diff --git a/vendor/github.com/containers/common/pkg/apparmor/apparmor_linux.go b/vendor/github.com/containers/common/pkg/apparmor/apparmor_linux.go index 7ba63ba7..435422c2 100644 --- a/vendor/github.com/containers/common/pkg/apparmor/apparmor_linux.go +++ b/vendor/github.com/containers/common/pkg/apparmor/apparmor_linux.go @@ -212,6 +212,11 @@ func parseAAParserVersion(output string) (int, error) { words := strings.Split(lines[0], " ") version := words[len(words)-1] + // trim "-beta1" suffix from version="3.0.0-beta1" if exists + version = strings.SplitN(version, "-", 2)[0] + // also trim "~..." suffix used historically (https://gitlab.com/apparmor/apparmor/-/commit/bca67d3d27d219d11ce8c9cc70612bd637f88c10) + version = strings.SplitN(version, "~", 2)[0] + // split by major minor version v := strings.Split(version, ".") if len(v) == 0 || len(v) > 3 { diff --git a/vendor/github.com/containers/common/pkg/auth/auth.go b/vendor/github.com/containers/common/pkg/auth/auth.go index a96ee8eb..6536d0f2 100644 --- a/vendor/github.com/containers/common/pkg/auth/auth.go +++ b/vendor/github.com/containers/common/pkg/auth/auth.go @@ -10,12 +10,13 @@ import ( "path/filepath" "strings" - "github.com/containers/common/pkg/util" + passwd "github.com/containers/common/pkg/password" "github.com/containers/image/v5/docker" "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/pkg/docker/config" "github.com/containers/image/v5/pkg/sysregistriesv2" "github.com/containers/image/v5/types" + "github.com/containers/storage/pkg/homedir" "github.com/sirupsen/logrus" ) @@ -39,33 +40,46 @@ func (e ErrNewCredentialsInvalid) Unwrap() error { // GetDefaultAuthFile returns env value REGISTRY_AUTH_FILE as default // --authfile path used in multiple --authfile flag definitions // Will fail over to DOCKER_CONFIG if REGISTRY_AUTH_FILE environment is not set +// +// WARNINGS: +// - In almost all invocations, expect this function to return ""; so it can not be used +// for directly accessing the file. +// - Use this only for commands that _read_ credentials, not write them. +// The path may refer to github.com/containers auth.json, or to Docker config.json, +// and the distinction is lost; writing auth.json data to config.json may not be consumable by Docker, +// or it may overwrite and discard unrelated Docker configuration set by the user. func GetDefaultAuthFile() string { + // Keep this in sync with the default logic in systemContextWithOptions! + if authfile := os.Getenv("REGISTRY_AUTH_FILE"); authfile != "" { return authfile } + // This pre-existing behavior is not conceptually consistent: + // If users have a ~/.docker/config.json in the default path, and no environment variable + // set, we read auth.json first, falling back to config.json; + // but if DOCKER_CONFIG is set, we read only config.json in that path, and we don’t read auth.json at all. if authEnv := os.Getenv("DOCKER_CONFIG"); authEnv != "" { return filepath.Join(authEnv, "config.json") } return "" } -// CheckAuthFile validates filepath given by --authfile -// used by command has --authfile flag -func CheckAuthFile(authfile string) error { - if authfile == "" { +// CheckAuthFile validates a path option, failing if the option is set but the referenced file is not accessible. +func CheckAuthFile(pathOption string) error { + if pathOption == "" { return nil } - if _, err := os.Stat(authfile); err != nil { - return fmt.Errorf("checking authfile: %w", err) + if _, err := os.Stat(pathOption); err != nil { + return fmt.Errorf("credential file is not accessible: %w", err) } return nil } // systemContextWithOptions returns a version of sys -// updated with authFile and certDir values (if they are not ""). +// updated with authFile, dockerCompatAuthFile and certDir values (if they are not ""). // NOTE: this is a shallow copy that can be used and updated, but may share // data with the original parameter. -func systemContextWithOptions(sys *types.SystemContext, authFile, certDir string) *types.SystemContext { +func systemContextWithOptions(sys *types.SystemContext, authFile, dockerCompatAuthFile, certDir string) (*types.SystemContext, error) { if sys != nil { sysCopy := *sys sys = &sysCopy @@ -73,24 +87,50 @@ func systemContextWithOptions(sys *types.SystemContext, authFile, certDir string sys = &types.SystemContext{} } - if authFile != "" { + defaultDockerConfigPath := filepath.Join(homedir.Get(), ".docker", "config.json") + switch { + case authFile != "" && dockerCompatAuthFile != "": + return nil, errors.New("options for paths to the credential file and to the Docker-compatible credential file can not be set simultaneously") + case authFile != "": + if authFile == defaultDockerConfigPath { + logrus.Warn("saving credentials to ~/.docker/config.json, but not using Docker-compatible file format") + } sys.AuthFilePath = authFile + case dockerCompatAuthFile != "": + sys.DockerCompatAuthFilePath = dockerCompatAuthFile + default: + // Keep this in sync with GetDefaultAuthFile()! + // + // Note that c/image does not natively implement the REGISTRY_AUTH_FILE + // variable, so not all callers look for credentials in this location. + if authFileVar := os.Getenv("REGISTRY_AUTH_FILE"); authFileVar != "" { + if authFileVar == defaultDockerConfigPath { + logrus.Warn("$REGISTRY_AUTH_FILE points to ~/.docker/config.json, but the file format is not fully compatible; use the Docker-compatible file path option instead") + } + sys.AuthFilePath = authFileVar + } else if dockerConfig := os.Getenv("DOCKER_CONFIG"); dockerConfig != "" { + // This preserves pre-existing _inconsistent_ behavior: + // If the Docker configuration exists in the default ~/.docker/config.json location, + // we DO NOT write to it; instead, we update auth.json in the default path. + // Only if the user explicitly sets DOCKER_CONFIG, we write to that config.json. + sys.DockerCompatAuthFilePath = filepath.Join(dockerConfig, "config.json") + } } if certDir != "" { sys.DockerCertPath = certDir } - return sys + return sys, nil } // Login implements a “log in” command with the provided opts and args // reading the password from opts.Stdin or the options in opts. func Login(ctx context.Context, systemContext *types.SystemContext, opts *LoginOptions, args []string) error { - systemContext = systemContextWithOptions(systemContext, opts.AuthFile, opts.CertDir) + systemContext, err := systemContextWithOptions(systemContext, opts.AuthFile, opts.DockerCompatAuthFile, opts.CertDir) + if err != nil { + return err + } - var ( - key, registry string - err error - ) + var key, registry string switch len(args) { case 0: if !opts.AcceptUnspecifiedRegistry { @@ -269,7 +309,7 @@ func getUserAndPass(opts *LoginOptions, password, userFromAuthFile string) (user } if password == "" { fmt.Fprint(opts.Stdout, "Password: ") - pass, err := util.ReadPassword(int(os.Stdin.Fd())) + pass, err := passwd.Read(int(os.Stdin.Fd())) if err != nil { return "", "", fmt.Errorf("reading password: %w", err) } @@ -284,7 +324,13 @@ func Logout(systemContext *types.SystemContext, opts *LogoutOptions, args []stri if err := CheckAuthFile(opts.AuthFile); err != nil { return err } - systemContext = systemContextWithOptions(systemContext, opts.AuthFile, "") + if err := CheckAuthFile(opts.DockerCompatAuthFile); err != nil { + return err + } + systemContext, err := systemContextWithOptions(systemContext, opts.AuthFile, opts.DockerCompatAuthFile, "") + if err != nil { + return err + } if opts.All { if len(args) != 0 { @@ -297,10 +343,7 @@ func Logout(systemContext *types.SystemContext, opts *LogoutOptions, args []stri return nil } - var ( - key, registry string - err error - ) + var key, registry string switch len(args) { case 0: if !opts.AcceptUnspecifiedRegistry { diff --git a/vendor/github.com/containers/common/pkg/auth/cli.go b/vendor/github.com/containers/common/pkg/auth/cli.go index 26727f35..60e02e51 100644 --- a/vendor/github.com/containers/common/pkg/auth/cli.go +++ b/vendor/github.com/containers/common/pkg/auth/cli.go @@ -14,14 +14,15 @@ type LoginOptions struct { // CLI flags managed by the FlagSet returned by GetLoginFlags // Callers that use GetLoginFlags should not need to touch these values at all; callers that use // other CLI frameworks should set them based on user input. - AuthFile string - CertDir string - Password string - Username string - StdinPassword bool - GetLoginSet bool - Verbose bool // set to true for verbose output - AcceptRepositories bool // set to true to allow namespaces or repositories rather than just registries + AuthFile string + DockerCompatAuthFile string + CertDir string + Password string + Username string + StdinPassword bool + GetLoginSet bool + Verbose bool // set to true for verbose output + AcceptRepositories bool // set to true to allow namespaces or repositories rather than just registries // Options caller can set Stdin io.Reader // set to os.Stdin Stdout io.Writer // set to os.Stdout @@ -34,9 +35,10 @@ type LogoutOptions struct { // CLI flags managed by the FlagSet returned by GetLogoutFlags // Callers that use GetLogoutFlags should not need to touch these values at all; callers that use // other CLI frameworks should set them based on user input. - AuthFile string - All bool - AcceptRepositories bool // set to true to allow namespaces or repositories rather than just registries + AuthFile string + DockerCompatAuthFile string + All bool + AcceptRepositories bool // set to true to allow namespaces or repositories rather than just registries // Options caller can set Stdout io.Writer // set to os.Stdout AcceptUnspecifiedRegistry bool // set to true if allows logout with unspecified registry @@ -45,7 +47,8 @@ type LogoutOptions struct { // GetLoginFlags defines and returns login flags for containers tools func GetLoginFlags(flags *LoginOptions) *pflag.FlagSet { fs := pflag.FlagSet{} - fs.StringVar(&flags.AuthFile, "authfile", GetDefaultAuthFile(), "path of the authentication file. Use REGISTRY_AUTH_FILE environment variable to override") + fs.StringVar(&flags.AuthFile, "authfile", "", "path of the authentication file. Use REGISTRY_AUTH_FILE environment variable to override") + fs.StringVar(&flags.DockerCompatAuthFile, "compat-auth-file", "", "path of a Docker-compatible config file to update instead") fs.StringVar(&flags.CertDir, "cert-dir", "", "use certificates at the specified path to access the registry") fs.StringVarP(&flags.Password, "password", "p", "", "Password for registry") fs.StringVarP(&flags.Username, "username", "u", "", "Username for registry") @@ -59,6 +62,7 @@ func GetLoginFlags(flags *LoginOptions) *pflag.FlagSet { func GetLoginFlagsCompletions() completion.FlagCompletions { flagCompletion := completion.FlagCompletions{} flagCompletion["authfile"] = completion.AutocompleteDefault + flagCompletion["compat-auth-file"] = completion.AutocompleteDefault flagCompletion["cert-dir"] = completion.AutocompleteDefault flagCompletion["password"] = completion.AutocompleteNone flagCompletion["username"] = completion.AutocompleteNone @@ -68,7 +72,8 @@ func GetLoginFlagsCompletions() completion.FlagCompletions { // GetLogoutFlags defines and returns logout flags for containers tools func GetLogoutFlags(flags *LogoutOptions) *pflag.FlagSet { fs := pflag.FlagSet{} - fs.StringVar(&flags.AuthFile, "authfile", GetDefaultAuthFile(), "path of the authentication file. Use REGISTRY_AUTH_FILE environment variable to override") + fs.StringVar(&flags.AuthFile, "authfile", "", "path of the authentication file. Use REGISTRY_AUTH_FILE environment variable to override") + fs.StringVar(&flags.DockerCompatAuthFile, "compat-auth-file", "", "path of a Docker-compatible config file to update instead") fs.BoolVarP(&flags.All, "all", "a", false, "Remove the cached credentials for all registries in the auth file") return &fs } @@ -77,5 +82,6 @@ func GetLogoutFlags(flags *LogoutOptions) *pflag.FlagSet { func GetLogoutFlagsCompletions() completion.FlagCompletions { flagCompletion := completion.FlagCompletions{} flagCompletion["authfile"] = completion.AutocompleteDefault + flagCompletion["compat-auth-file"] = completion.AutocompleteDefault return flagCompletion } diff --git a/vendor/github.com/containers/common/pkg/cgroups/blkio.go b/vendor/github.com/containers/common/pkg/cgroups/blkio.go deleted file mode 100644 index e157e6fa..00000000 --- a/vendor/github.com/containers/common/pkg/cgroups/blkio.go +++ /dev/null @@ -1,151 +0,0 @@ -//go:build !linux -// +build !linux - -package cgroups - -import ( - "bufio" - "errors" - "fmt" - "os" - "path/filepath" - "strconv" - "strings" - - spec "github.com/opencontainers/runtime-spec/specs-go" -) - -type blkioHandler struct{} - -func getBlkioHandler() *blkioHandler { - return &blkioHandler{} -} - -// Apply set the specified constraints -func (c *blkioHandler) Apply(ctr *CgroupControl, res *spec.LinuxResources) error { - if res.BlockIO == nil { - return nil - } - return fmt.Errorf("blkio apply function not implemented yet") -} - -// Create the cgroup -func (c *blkioHandler) Create(ctr *CgroupControl) (bool, error) { - if ctr.cgroup2 { - return false, nil - } - return ctr.createCgroupDirectory(Blkio) -} - -// Destroy the cgroup -func (c *blkioHandler) Destroy(ctr *CgroupControl) error { - return rmDirRecursively(ctr.getCgroupv1Path(Blkio)) -} - -// Stat fills a metrics structure with usage stats for the controller -func (c *blkioHandler) Stat(ctr *CgroupControl, m *Metrics) error { - var ioServiceBytesRecursive []BlkIOEntry - - if ctr.cgroup2 { - // more details on the io.stat file format:X https://facebookmicrosites.github.io/cgroup2/docs/io-controller.html - values, err := readCgroup2MapFile(ctr, "io.stat") - if err != nil { - return err - } - for k, v := range values { - d := strings.Split(k, ":") - if len(d) != 2 { - continue - } - minor, err := strconv.ParseUint(d[0], 10, 0) - if err != nil { - return err - } - major, err := strconv.ParseUint(d[1], 10, 0) - if err != nil { - return err - } - - for _, item := range v { - d := strings.Split(item, "=") - if len(d) != 2 { - continue - } - op := d[0] - - // Accommodate the cgroup v1 naming - switch op { - case "rbytes": - op = "read" - case "wbytes": - op = "write" - } - - value, err := strconv.ParseUint(d[1], 10, 0) - if err != nil { - return err - } - - entry := BlkIOEntry{ - Op: op, - Major: major, - Minor: minor, - Value: value, - } - ioServiceBytesRecursive = append(ioServiceBytesRecursive, entry) - } - } - } else { - BlkioRoot := ctr.getCgroupv1Path(Blkio) - - p := filepath.Join(BlkioRoot, "blkio.throttle.io_service_bytes_recursive") - f, err := os.Open(p) - if err != nil { - if errors.Is(err, os.ErrNotExist) { - return nil - } - return fmt.Errorf("open %s: %w", p, err) - } - defer f.Close() - - scanner := bufio.NewScanner(f) - for scanner.Scan() { - line := scanner.Text() - parts := strings.Fields(line) - if len(parts) < 3 { - continue - } - d := strings.Split(parts[0], ":") - if len(d) != 2 { - continue - } - minor, err := strconv.ParseUint(d[0], 10, 0) - if err != nil { - return err - } - major, err := strconv.ParseUint(d[1], 10, 0) - if err != nil { - return err - } - - op := parts[1] - - value, err := strconv.ParseUint(parts[2], 10, 0) - if err != nil { - return err - } - entry := BlkIOEntry{ - Op: op, - Major: major, - Minor: minor, - Value: value, - } - ioServiceBytesRecursive = append(ioServiceBytesRecursive, entry) - } - if err := scanner.Err(); err != nil { - return fmt.Errorf("parse %s: %w", p, err) - } - } - m.Blkio = BlkioMetrics{IoServiceBytesRecursive: ioServiceBytesRecursive} - return nil -} diff --git a/vendor/github.com/containers/common/pkg/cgroups/cgroups.go b/vendor/github.com/containers/common/pkg/cgroups/cgroups.go deleted file mode 100644 index 10b70b8f..00000000 --- a/vendor/github.com/containers/common/pkg/cgroups/cgroups.go +++ /dev/null @@ -1,614 +0,0 @@ -//go:build !linux -// +build !linux - -package cgroups - -import ( - "bufio" - "context" - "errors" - "fmt" - "math" - "os" - "path/filepath" - "strconv" - "strings" - - "github.com/containers/storage/pkg/unshare" - systemdDbus "github.com/coreos/go-systemd/v22/dbus" - "github.com/godbus/dbus/v5" - spec "github.com/opencontainers/runtime-spec/specs-go" - "github.com/sirupsen/logrus" -) - -var ( - // ErrCgroupDeleted means the cgroup was deleted - ErrCgroupDeleted = errors.New("cgroup deleted") - // ErrCgroupV1Rootless means the cgroup v1 were attempted to be used in rootless environment - ErrCgroupV1Rootless = errors.New("no support for CGroups V1 in rootless environments") - ErrStatCgroup = errors.New("no cgroup available for gathering user statistics") -) - -// CgroupControl controls a cgroup hierarchy -type CgroupControl struct { - cgroup2 bool - path string - systemd bool - // List of additional cgroup subsystems joined that - // do not have a custom handler. - additionalControllers []controller -} - -// CPUUsage keeps stats for the CPU usage (unit: nanoseconds) -type CPUUsage struct { - Kernel uint64 - Total uint64 - PerCPU []uint64 -} - -// MemoryUsage keeps stats for the memory usage -type MemoryUsage struct { - Usage uint64 - Limit uint64 -} - -// CPUMetrics keeps stats for the CPU usage -type CPUMetrics struct { - Usage CPUUsage -} - -// BlkIOEntry describes an entry in the blkio stats -type BlkIOEntry struct { - Op string - Major uint64 - Minor uint64 - Value uint64 -} - -// BlkioMetrics keeps usage stats for the blkio cgroup controller -type BlkioMetrics struct { - IoServiceBytesRecursive []BlkIOEntry -} - -// MemoryMetrics keeps usage stats for the memory cgroup controller -type MemoryMetrics struct { - Usage MemoryUsage -} - -// PidsMetrics keeps usage stats for the pids cgroup controller -type PidsMetrics struct { - Current uint64 -} - -// Metrics keeps usage stats for the cgroup controllers -type Metrics struct { - CPU CPUMetrics - Blkio BlkioMetrics - Memory MemoryMetrics - Pids PidsMetrics -} - -type controller struct { - name string - symlink bool -} - -type controllerHandler interface { - Create(*CgroupControl) (bool, error) - Apply(*CgroupControl, *spec.LinuxResources) error - Destroy(*CgroupControl) error - Stat(*CgroupControl, *Metrics) error -} - -const ( - cgroupRoot = "/sys/fs/cgroup" - // CPU is the cpu controller - CPU = "cpu" - // CPUAcct is the cpuacct controller - CPUAcct = "cpuacct" - // CPUset is the cpuset controller - CPUset = "cpuset" - // Memory is the memory controller - Memory = "memory" - // Pids is the pids controller - Pids = "pids" - // Blkio is the blkio controller - Blkio = "blkio" -) - -var handlers map[string]controllerHandler - -func init() { - handlers = make(map[string]controllerHandler) - handlers[CPU] = getCPUHandler() - handlers[CPUset] = getCpusetHandler() - handlers[Memory] = getMemoryHandler() - handlers[Pids] = getPidsHandler() - handlers[Blkio] = getBlkioHandler() -} - -// getAvailableControllers get the available controllers -func getAvailableControllers(exclude map[string]controllerHandler, cgroup2 bool) ([]controller, error) { - if cgroup2 { - controllers := []controller{} - controllersFile := cgroupRoot + "/cgroup.controllers" - // rootless cgroupv2: check available controllers for current user, systemd or servicescope will inherit - if unshare.IsRootless() { - userSlice, err := getCgroupPathForCurrentProcess() - if err != nil { - return controllers, err - } - // userSlice already contains '/' so not adding here - basePath := cgroupRoot + userSlice - controllersFile = fmt.Sprintf("%s/cgroup.controllers", basePath) - } - controllersFileBytes, err := os.ReadFile(controllersFile) - if err != nil { - return nil, fmt.Errorf("failed while reading controllers for cgroup v2: %w", err) - } - for _, controllerName := range strings.Fields(string(controllersFileBytes)) { - c := controller{ - name: controllerName, - symlink: false, - } - controllers = append(controllers, c) - } - return controllers, nil - } - - subsystems, _ := cgroupV1GetAllSubsystems() - controllers := []controller{} - // cgroupv1 and rootless: No subsystem is available: delegation is unsafe. - if unshare.IsRootless() { - return controllers, nil - } - - for _, name := range subsystems { - if _, found := exclude[name]; found { - continue - } - fileInfo, err := os.Stat(cgroupRoot + "/" + name) - if err != nil { - continue - } - c := controller{ - name: name, - symlink: !fileInfo.IsDir(), - } - controllers = append(controllers, c) - } - - return controllers, nil -} - -// GetAvailableControllers get string:bool map of all the available controllers -func GetAvailableControllers(exclude map[string]controllerHandler, cgroup2 bool) ([]string, error) { - availableControllers, err := getAvailableControllers(exclude, cgroup2) - if err != nil { - return nil, err - } - controllerList := []string{} - for _, controller := range availableControllers { - controllerList = append(controllerList, controller.name) - } - - return controllerList, nil -} - -func cgroupV1GetAllSubsystems() ([]string, error) { - f, err := os.Open("/proc/cgroups") - if err != nil { - return nil, err - } - defer f.Close() - - subsystems := []string{} - - s := bufio.NewScanner(f) - for s.Scan() { - text := s.Text() - if text[0] != '#' { - parts := strings.Fields(text) - if len(parts) >= 4 && parts[3] != "0" { - subsystems = append(subsystems, parts[0]) - } - } - } - if err := s.Err(); err != nil { - return nil, err - } - return subsystems, nil -} - -func getCgroupPathForCurrentProcess() (string, error) { - path := fmt.Sprintf("/proc/%d/cgroup", os.Getpid()) - f, err := os.Open(path) - if err != nil { - return "", err - } - defer f.Close() - - cgroupPath := "" - s := bufio.NewScanner(f) - for s.Scan() { - text := s.Text() - procEntries := strings.SplitN(text, "::", 2) - // set process cgroupPath only if entry is valid - if len(procEntries) > 1 { - cgroupPath = procEntries[1] - } - } - if err := s.Err(); err != nil { - return cgroupPath, err - } - return cgroupPath, nil -} - -// getCgroupv1Path is a helper function to get the cgroup v1 path -func (c *CgroupControl) getCgroupv1Path(name string) string { - return filepath.Join(cgroupRoot, name, c.path) -} - -// initialize initializes the specified hierarchy -func (c *CgroupControl) initialize() (err error) { - createdSoFar := map[string]controllerHandler{} - defer func() { - if err != nil { - for name, ctr := range createdSoFar { - if err := ctr.Destroy(c); err != nil { - logrus.Warningf("error cleaning up controller %s for %s", name, c.path) - } - } - } - }() - if c.cgroup2 { - if err := createCgroupv2Path(filepath.Join(cgroupRoot, c.path)); err != nil { - return fmt.Errorf("creating cgroup path %s: %w", c.path, err) - } - } - for name, handler := range handlers { - created, err := handler.Create(c) - if err != nil { - return err - } - if created { - createdSoFar[name] = handler - } - } - - if !c.cgroup2 { - // We won't need to do this for cgroup v2 - for _, ctr := range c.additionalControllers { - if ctr.symlink { - continue - } - path := c.getCgroupv1Path(ctr.name) - if err := os.MkdirAll(path, 0o755); err != nil { - return fmt.Errorf("creating cgroup path for %s: %w", ctr.name, err) - } - } - } - - return nil -} - -func readFileAsUint64(path string) (uint64, error) { - data, err := os.ReadFile(path) - if err != nil { - return 0, err - } - v := cleanString(string(data)) - if v == "max" { - return math.MaxUint64, nil - } - ret, err := strconv.ParseUint(v, 10, 64) - if err != nil { - return ret, fmt.Errorf("parse %s from %s: %w", v, path, err) - } - return ret, nil -} - -func readFileByKeyAsUint64(path, key string) (uint64, error) { - content, err := os.ReadFile(path) - if err != nil { - return 0, err - } - for _, line := range strings.Split(string(content), "\n") { - fields := strings.SplitN(line, " ", 2) - if fields[0] == key { - v := cleanString(string(fields[1])) - if v == "max" { - return math.MaxUint64, nil - } - ret, err := strconv.ParseUint(v, 10, 64) - if err != nil { - return ret, fmt.Errorf("parse %s from %s: %w", v, path, err) - } - return ret, nil - } - } - - return 0, fmt.Errorf("no key named %s from %s", key, path) -} - -// New creates a new cgroup control -func New(path string, resources *spec.LinuxResources) (*CgroupControl, error) { - cgroup2, err := IsCgroup2UnifiedMode() - if err != nil { - return nil, err - } - control := &CgroupControl{ - cgroup2: cgroup2, - path: path, - } - - if !cgroup2 { - controllers, err := getAvailableControllers(handlers, false) - if err != nil { - return nil, err - } - control.additionalControllers = controllers - } - - if err := control.initialize(); err != nil { - return nil, err - } - - return control, nil -} - -// NewSystemd creates a new cgroup control -func NewSystemd(path string) (*CgroupControl, error) { - cgroup2, err := IsCgroup2UnifiedMode() - if err != nil { - return nil, err - } - control := &CgroupControl{ - cgroup2: cgroup2, - path: path, - systemd: true, - } - return control, nil -} - -// Load loads an existing cgroup control -func Load(path string) (*CgroupControl, error) { - cgroup2, err := IsCgroup2UnifiedMode() - if err != nil { - return nil, err - } - control := &CgroupControl{ - cgroup2: cgroup2, - path: path, - systemd: false, - } - if !cgroup2 { - controllers, err := getAvailableControllers(handlers, false) - if err != nil { - return nil, err - } - control.additionalControllers = controllers - } - if !cgroup2 { - oneExists := false - // check that the cgroup exists at least under one controller - for name := range handlers { - p := control.getCgroupv1Path(name) - if _, err := os.Stat(p); err == nil { - oneExists = true - break - } - } - - // if there is no controller at all, raise an error - if !oneExists { - if unshare.IsRootless() { - return nil, ErrCgroupV1Rootless - } - // compatible with the error code - // used by containerd/cgroups - return nil, ErrCgroupDeleted - } - } - return control, nil -} - -// CreateSystemdUnit creates the systemd cgroup -func (c *CgroupControl) CreateSystemdUnit(path string) error { - if !c.systemd { - return fmt.Errorf("the cgroup controller is not using systemd") - } - - conn, err := systemdDbus.NewWithContext(context.TODO()) - if err != nil { - return err - } - defer conn.Close() - - return systemdCreate(path, conn) -} - -// GetUserConnection returns a user connection to D-BUS -func GetUserConnection(uid int) (*systemdDbus.Conn, error) { - return systemdDbus.NewConnection(func() (*dbus.Conn, error) { - return dbusAuthConnection(uid, dbus.SessionBusPrivateNoAutoStartup) - }) -} - -// CreateSystemdUserUnit creates the systemd cgroup for the specified user -func (c *CgroupControl) CreateSystemdUserUnit(path string, uid int) error { - if !c.systemd { - return fmt.Errorf("the cgroup controller is not using systemd") - } - - conn, err := GetUserConnection(uid) - if err != nil { - return err - } - defer conn.Close() - - return systemdCreate(path, conn) -} - -func dbusAuthConnection(uid int, createBus func(opts ...dbus.ConnOption) (*dbus.Conn, error)) (*dbus.Conn, error) { - conn, err := createBus() - if err != nil { - return nil, err - } - - methods := []dbus.Auth{dbus.AuthExternal(strconv.Itoa(uid))} - - err = conn.Auth(methods) - if err != nil { - conn.Close() - return nil, err - } - if err := conn.Hello(); err != nil { - return nil, err - } - - return conn, nil -} - -// Delete cleans a cgroup -func (c *CgroupControl) Delete() error { - return c.DeleteByPath(c.path) -} - -// DeleteByPathConn deletes the specified cgroup path using the specified -// dbus connection if needed. -func (c *CgroupControl) DeleteByPathConn(path string, conn *systemdDbus.Conn) error { - if c.systemd { - return systemdDestroyConn(path, conn) - } - if c.cgroup2 { - return rmDirRecursively(filepath.Join(cgroupRoot, c.path)) - } - var lastError error - for _, h := range handlers { - if err := h.Destroy(c); err != nil { - lastError = err - } - } - - for _, ctr := range c.additionalControllers { - if ctr.symlink { - continue - } - p := c.getCgroupv1Path(ctr.name) - if err := rmDirRecursively(p); err != nil { - lastError = fmt.Errorf("remove %s: %w", p, err) - } - } - return lastError -} - -// DeleteByPath deletes the specified cgroup path -func (c *CgroupControl) DeleteByPath(path string) error { - if c.systemd { - conn, err := systemdDbus.NewWithContext(context.TODO()) - if err != nil { - return err - } - defer conn.Close() - return c.DeleteByPathConn(path, conn) - } - return c.DeleteByPathConn(path, nil) -} - -// Update updates the cgroups -func (c *CgroupControl) Update(resources *spec.LinuxResources) error { - for _, h := range handlers { - if err := h.Apply(c, resources); err != nil { - return err - } - } - return nil -} - -// AddPid moves the specified pid to the cgroup -func (c *CgroupControl) AddPid(pid int) error { - pidString := []byte(fmt.Sprintf("%d\n", pid)) - - if c.cgroup2 { - p := filepath.Join(cgroupRoot, c.path, "cgroup.procs") - if err := os.WriteFile(p, pidString, 0o644); err != nil { - return fmt.Errorf("write %s: %w", p, err) - } - return nil - } - - names := make([]string, 0, len(handlers)) - for n := range handlers { - names = append(names, n) - } - - for _, c := range c.additionalControllers { - if !c.symlink { - names = append(names, c.name) - } - } - - for _, n := range names { - // If we aren't using cgroup2, we won't write correctly to unified hierarchy - if !c.cgroup2 && n == "unified" { - continue - } - p := filepath.Join(c.getCgroupv1Path(n), "tasks") - if err := os.WriteFile(p, pidString, 0o644); err != nil { - return fmt.Errorf("write %s: %w", p, err) - } - } - return nil -} - -// Stat returns usage statistics for the cgroup -func (c *CgroupControl) Stat() (*Metrics, error) { - m := Metrics{} - found := false - for _, h := range handlers { - if err := h.Stat(c, &m); err != nil { - if !errors.Is(err, os.ErrNotExist) { - return nil, err - } - logrus.Warningf("Failed to retrieve cgroup stats: %v", err) - continue - } - found = true - } - if !found { - return nil, ErrStatCgroup - } - return &m, nil -} - -func readCgroupMapPath(path string) (map[string][]string, error) { - ret := map[string][]string{} - f, err := os.Open(path) - if err != nil { - if errors.Is(err, os.ErrNotExist) { - return ret, nil - } - return nil, fmt.Errorf("open file %s: %w", path, err) - } - defer f.Close() - scanner := bufio.NewScanner(f) - for scanner.Scan() { - line := scanner.Text() - parts := strings.Fields(line) - if len(parts) < 2 { - continue - } - ret[parts[0]] = parts[1:] - } - if err := scanner.Err(); err != nil { - return nil, fmt.Errorf("parsing file %s: %w", path, err) - } - return ret, nil -} - -func readCgroup2MapFile(ctr *CgroupControl, name string) (map[string][]string, error) { - p := filepath.Join(cgroupRoot, ctr.path, name) - - return readCgroupMapPath(p) -} diff --git a/vendor/github.com/containers/common/pkg/cgroups/cgroups_linux.go b/vendor/github.com/containers/common/pkg/cgroups/cgroups_linux.go index e778b0e8..7605b500 100644 --- a/vendor/github.com/containers/common/pkg/cgroups/cgroups_linux.go +++ b/vendor/github.com/containers/common/pkg/cgroups/cgroups_linux.go @@ -5,6 +5,7 @@ package cgroups import ( "bufio" + "bytes" "context" "errors" "fmt" @@ -134,8 +135,8 @@ func getAvailableControllers(exclude map[string]controllerHandler, cgroup2 bool) return controllers, nil } -// GetAvailableControllers get string:bool map of all the available controllers -func GetAvailableControllers(exclude map[string]controllerHandler, cgroup2 bool) ([]string, error) { +// AvailableControllers get string:bool map of all the available controllers +func AvailableControllers(exclude map[string]controllerHandler, cgroup2 bool) ([]string, error) { availableControllers, err := getAvailableControllers(exclude, cgroup2) if err != nil { return nil, err @@ -391,20 +392,13 @@ func (c *CgroupControl) CreateSystemdUnit(path string) error { return systemdCreate(c.config.Resources, path, conn) } -// GetUserConnection returns an user connection to D-BUS -func GetUserConnection(uid int) (*systemdDbus.Conn, error) { - return systemdDbus.NewConnection(func() (*dbus.Conn, error) { - return dbusAuthConnection(uid, dbus.SessionBusPrivateNoAutoStartup) - }) -} - // CreateSystemdUserUnit creates the systemd cgroup for the specified user func (c *CgroupControl) CreateSystemdUserUnit(path string, uid int) error { if !c.systemd { return fmt.Errorf("the cgroup controller is not using systemd") } - conn, err := GetUserConnection(uid) + conn, err := UserConnection(uid) if err != nil { return err } @@ -572,3 +566,170 @@ func readCgroup2MapFile(ctr *CgroupControl, name string) (map[string][]string, e return readCgroupMapPath(p) } + +func (c *CgroupControl) createCgroupDirectory(controller string) (bool, error) { + cPath := c.getCgroupv1Path(controller) + _, err := os.Stat(cPath) + if err == nil { + return false, nil + } + + if !errors.Is(err, os.ErrNotExist) { + return false, err + } + + if err := os.MkdirAll(cPath, 0o755); err != nil { + return false, fmt.Errorf("creating cgroup for %s: %w", controller, err) + } + return true, nil +} + +var TestMode bool + +func createCgroupv2Path(path string) (deferredError error) { + if !strings.HasPrefix(path, cgroupRoot+"/") { + return fmt.Errorf("invalid cgroup path %s", path) + } + content, err := os.ReadFile(cgroupRoot + "/cgroup.controllers") + if err != nil { + return err + } + ctrs := bytes.Fields(content) + res := append([]byte("+"), bytes.Join(ctrs, []byte(" +"))...) + + current := "/sys/fs" + elements := strings.Split(path, "/") + for i, e := range elements[3:] { + current = filepath.Join(current, e) + if i > 0 { + if err := os.Mkdir(current, 0o755); err != nil { + if !os.IsExist(err) { + return err + } + } else { + // If the directory was created, be sure it is not left around on errors. + defer func() { + if deferredError != nil { + os.Remove(current) + } + }() + } + } + // We enable the controllers for all the path components except the last one. It is not allowed to add + // PIDs if there are already enabled controllers. + if i < len(elements[3:])-1 { + if err := os.WriteFile(filepath.Join(current, "cgroup.subtree_control"), res, 0o755); err != nil { + return err + } + } + } + return nil +} + +func cleanString(s string) string { + return strings.Trim(s, "\n") +} + +func readAcct(ctr *CgroupControl, name string) (uint64, error) { + p := filepath.Join(ctr.getCgroupv1Path(CPUAcct), name) + return readFileAsUint64(p) +} + +func readAcctList(ctr *CgroupControl, name string) ([]uint64, error) { + p := filepath.Join(ctr.getCgroupv1Path(CPUAcct), name) + data, err := os.ReadFile(p) + if err != nil { + return nil, err + } + r := []uint64{} + for _, s := range strings.Split(string(data), " ") { + s = cleanString(s) + if s == "" { + break + } + v, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return nil, fmt.Errorf("parsing %s: %w", s, err) + } + r = append(r, v) + } + return r, nil +} + +func cpusetCopyFromParent(path string, cgroupv2 bool) error { + for _, file := range []string{"cpuset.cpus", "cpuset.mems"} { + if _, err := cpusetCopyFileFromParent(path, file, cgroupv2); err != nil { + return err + } + } + return nil +} + +func cpusetCopyFileFromParent(dir, file string, cgroupv2 bool) ([]byte, error) { + if dir == cgroupRoot { + return nil, fmt.Errorf("could not find parent to initialize cpuset %s", file) + } + path := filepath.Join(dir, file) + parentPath := path + if cgroupv2 { + parentPath = fmt.Sprintf("%s.effective", parentPath) + } + data, err := os.ReadFile(parentPath) + if err != nil { + // if the file doesn't exist, it is likely that the cpuset controller + // is not enabled in the kernel. + if os.IsNotExist(err) { + return nil, nil + } + return nil, err + } + if strings.Trim(string(data), "\n") != "" { + return data, nil + } + data, err = cpusetCopyFileFromParent(filepath.Dir(dir), file, cgroupv2) + if err != nil { + return nil, err + } + if err := os.WriteFile(path, data, 0o644); err != nil { + return nil, fmt.Errorf("write %s: %w", path, err) + } + return data, nil +} + +// SystemCPUUsage returns the system usage for all the cgroups +func SystemCPUUsage() (uint64, error) { + cgroupv2, err := IsCgroup2UnifiedMode() + if err != nil { + return 0, err + } + if !cgroupv2 { + p := filepath.Join(cgroupRoot, CPUAcct, "cpuacct.usage") + return readFileAsUint64(p) + } + + files, err := os.ReadDir(cgroupRoot) + if err != nil { + return 0, err + } + var total uint64 + for _, file := range files { + if !file.IsDir() { + continue + } + p := filepath.Join(cgroupRoot, file.Name(), "cpu.stat") + + values, err := readCgroupMapPath(p) + if err != nil { + return 0, err + } + + if val, found := values["usage_usec"]; found { + v, err := strconv.ParseUint(cleanString(val[0]), 10, 64) + if err != nil { + return 0, err + } + total += v * 1000 + } + } + return total, nil +} diff --git a/vendor/github.com/containers/common/pkg/cgroups/cgroups_supported.go b/vendor/github.com/containers/common/pkg/cgroups/cgroups_supported.go index 3a861223..5c0cac64 100644 --- a/vendor/github.com/containers/common/pkg/cgroups/cgroups_supported.go +++ b/vendor/github.com/containers/common/pkg/cgroups/cgroups_supported.go @@ -15,6 +15,8 @@ import ( "syscall" "time" + systemdDbus "github.com/coreos/go-systemd/v22/dbus" + "github.com/godbus/dbus/v5" "golang.org/x/sys/unix" ) @@ -37,6 +39,13 @@ func IsCgroup2UnifiedMode() (bool, error) { return isUnified, isUnifiedErr } +// UserConnection returns an user connection to D-BUS +func UserConnection(uid int) (*systemdDbus.Conn, error) { + return systemdDbus.NewConnection(func() (*dbus.Conn, error) { + return dbusAuthConnection(uid, dbus.SessionBusPrivateNoAutoStartup) + }) +} + // UserOwnsCurrentSystemdCgroup checks whether the current EUID owns the // current cgroup. func UserOwnsCurrentSystemdCgroup() (bool, error) { diff --git a/vendor/github.com/containers/common/pkg/cgroups/cgroups_unsupported.go b/vendor/github.com/containers/common/pkg/cgroups/cgroups_unsupported.go index b3dcb2d3..f2558728 100644 --- a/vendor/github.com/containers/common/pkg/cgroups/cgroups_unsupported.go +++ b/vendor/github.com/containers/common/pkg/cgroups/cgroups_unsupported.go @@ -4,7 +4,10 @@ package cgroups import ( + "fmt" "os" + + systemdDbus "github.com/coreos/go-systemd/v22/dbus" ) // IsCgroup2UnifiedMode returns whether we are running in cgroup 2 cgroup2 mode. @@ -21,3 +24,8 @@ func UserOwnsCurrentSystemdCgroup() (bool, error) { func rmDirRecursively(path string) error { return os.RemoveAll(path) } + +// UserConnection returns an user connection to D-BUS +func UserConnection(uid int) (*systemdDbus.Conn, error) { + return nil, fmt.Errorf("systemd d-bus is not supported on this platform") +} diff --git a/vendor/github.com/containers/common/pkg/cgroups/cpu.go b/vendor/github.com/containers/common/pkg/cgroups/cpu.go deleted file mode 100644 index 16293e74..00000000 --- a/vendor/github.com/containers/common/pkg/cgroups/cpu.go +++ /dev/null @@ -1,91 +0,0 @@ -//go:build !linux -// +build !linux - -package cgroups - -import ( - "errors" - "fmt" - "os" - "strconv" - - spec "github.com/opencontainers/runtime-spec/specs-go" -) - -type cpuHandler struct{} - -func getCPUHandler() *cpuHandler { - return &cpuHandler{} -} - -// Apply set the specified constraints -func (c *cpuHandler) Apply(ctr *CgroupControl, res *spec.LinuxResources) error { - if res.CPU == nil { - return nil - } - return fmt.Errorf("cpu apply not implemented yet") -} - -// Create the cgroup -func (c *cpuHandler) Create(ctr *CgroupControl) (bool, error) { - if ctr.cgroup2 { - return false, nil - } - return ctr.createCgroupDirectory(CPU) -} - -// Destroy the cgroup -func (c *cpuHandler) Destroy(ctr *CgroupControl) error { - return rmDirRecursively(ctr.getCgroupv1Path(CPU)) -} - -// Stat fills a metrics structure with usage stats for the controller -func (c *cpuHandler) Stat(ctr *CgroupControl, m *Metrics) error { - var err error - usage := CPUUsage{} - if ctr.cgroup2 { - values, err := readCgroup2MapFile(ctr, "cpu.stat") - if err != nil { - return err - } - if val, found := values["usage_usec"]; found { - usage.Total, err = strconv.ParseUint(cleanString(val[0]), 10, 64) - if err != nil { - return err - } - usage.Kernel *= 1000 - } - if val, found := values["system_usec"]; found { - usage.Kernel, err = strconv.ParseUint(cleanString(val[0]), 10, 64) - if err != nil { - return err - } - usage.Total *= 1000 - } - // FIXME: How to read usage.PerCPU? - } else { - usage.Total, err = readAcct(ctr, "cpuacct.usage") - if err != nil { - if !errors.Is(err, os.ErrNotExist) { - return err - } - usage.Total = 0 - } - usage.Kernel, err = readAcct(ctr, "cpuacct.usage_sys") - if err != nil { - if !errors.Is(err, os.ErrNotExist) { - return err - } - usage.Kernel = 0 - } - usage.PerCPU, err = readAcctList(ctr, "cpuacct.usage_percpu") - if err != nil { - if !errors.Is(err, os.ErrNotExist) { - return err - } - usage.PerCPU = nil - } - } - m.CPU = CPUMetrics{Usage: usage} - return nil -} diff --git a/vendor/github.com/containers/common/pkg/cgroups/cpuset.go b/vendor/github.com/containers/common/pkg/cgroups/cpuset.go deleted file mode 100644 index f7ec9a33..00000000 --- a/vendor/github.com/containers/common/pkg/cgroups/cpuset.go +++ /dev/null @@ -1,49 +0,0 @@ -//go:build !linux -// +build !linux - -package cgroups - -import ( - "fmt" - "path/filepath" - - spec "github.com/opencontainers/runtime-spec/specs-go" -) - -type cpusetHandler struct{} - -func getCpusetHandler() *cpusetHandler { - return &cpusetHandler{} -} - -// Apply set the specified constraints -func (c *cpusetHandler) Apply(ctr *CgroupControl, res *spec.LinuxResources) error { - if res.CPU == nil { - return nil - } - return fmt.Errorf("cpuset apply not implemented yet") -} - -// Create the cgroup -func (c *cpusetHandler) Create(ctr *CgroupControl) (bool, error) { - if ctr.cgroup2 { - path := filepath.Join(cgroupRoot, ctr.path) - return true, cpusetCopyFromParent(path, true) - } - - created, err := ctr.createCgroupDirectory(CPUset) - if !created || err != nil { - return created, err - } - return true, cpusetCopyFromParent(ctr.getCgroupv1Path(CPUset), false) -} - -// Destroy the cgroup -func (c *cpusetHandler) Destroy(ctr *CgroupControl) error { - return rmDirRecursively(ctr.getCgroupv1Path(CPUset)) -} - -// Stat fills a metrics structure with usage stats for the controller -func (c *cpusetHandler) Stat(ctr *CgroupControl, m *Metrics) error { - return nil -} diff --git a/vendor/github.com/containers/common/pkg/cgroups/memory.go b/vendor/github.com/containers/common/pkg/cgroups/memory.go deleted file mode 100644 index b597b85b..00000000 --- a/vendor/github.com/containers/common/pkg/cgroups/memory.go +++ /dev/null @@ -1,69 +0,0 @@ -//go:build !linux -// +build !linux - -package cgroups - -import ( - "fmt" - "path/filepath" - - spec "github.com/opencontainers/runtime-spec/specs-go" -) - -type memHandler struct{} - -func getMemoryHandler() *memHandler { - return &memHandler{} -} - -// Apply set the specified constraints -func (c *memHandler) Apply(ctr *CgroupControl, res *spec.LinuxResources) error { - if res.Memory == nil { - return nil - } - return fmt.Errorf("memory apply not implemented yet") -} - -// Create the cgroup -func (c *memHandler) Create(ctr *CgroupControl) (bool, error) { - if ctr.cgroup2 { - return false, nil - } - return ctr.createCgroupDirectory(Memory) -} - -// Destroy the cgroup -func (c *memHandler) Destroy(ctr *CgroupControl) error { - return rmDirRecursively(ctr.getCgroupv1Path(Memory)) -} - -// Stat fills a metrics structure with usage stats for the controller -func (c *memHandler) Stat(ctr *CgroupControl, m *Metrics) error { - var err error - usage := MemoryUsage{} - - var memoryRoot string - var limitFilename string - - if ctr.cgroup2 { - memoryRoot = filepath.Join(cgroupRoot, ctr.path) - limitFilename = "memory.max" - if usage.Usage, err = readFileByKeyAsUint64(filepath.Join(memoryRoot, "memory.stat"), "anon"); err != nil { - return err - } - } else { - memoryRoot = ctr.getCgroupv1Path(Memory) - limitFilename = "memory.limit_in_bytes" - if usage.Usage, err = readFileAsUint64(filepath.Join(memoryRoot, "memory.usage_in_bytes")); err != nil { - return err - } - } - - usage.Limit, err = readFileAsUint64(filepath.Join(memoryRoot, limitFilename)) - if err != nil { - return err - } - - m.Memory = MemoryMetrics{Usage: usage} - return nil -} diff --git a/vendor/github.com/containers/common/pkg/cgroups/pids.go b/vendor/github.com/containers/common/pkg/cgroups/pids.go deleted file mode 100644 index 76e983ea..00000000 --- a/vendor/github.com/containers/common/pkg/cgroups/pids.go +++ /dev/null @@ -1,71 +0,0 @@ -//go:build !linux -// +build !linux - -package cgroups - -import ( - "fmt" - "os" - "path/filepath" - - spec "github.com/opencontainers/runtime-spec/specs-go" -) - -type pidHandler struct{} - -func getPidsHandler() *pidHandler { - return &pidHandler{} -} - -// Apply set the specified constraints -func (c *pidHandler) Apply(ctr *CgroupControl, res *spec.LinuxResources) error { - if res.Pids == nil { - return nil - } - var PIDRoot string - - if ctr.cgroup2 { - PIDRoot = filepath.Join(cgroupRoot, ctr.path) - } else { - PIDRoot = ctr.getCgroupv1Path(Pids) - } - - p := filepath.Join(PIDRoot, "pids.max") - return os.WriteFile(p, []byte(fmt.Sprintf("%d\n", res.Pids.Limit)), 0o644) -} - -// Create the cgroup -func (c *pidHandler) Create(ctr *CgroupControl) (bool, error) { - if ctr.cgroup2 { - return false, nil - } - return ctr.createCgroupDirectory(Pids) -} - -// Destroy the cgroup -func (c *pidHandler) Destroy(ctr *CgroupControl) error { - return rmDirRecursively(ctr.getCgroupv1Path(Pids)) -} - -// Stat fills a metrics structure with usage stats for the controller -func (c *pidHandler) Stat(ctr *CgroupControl, m *Metrics) error { - if ctr.path == "" { - // nothing we can do to retrieve the pids.current path - return nil - } - - var PIDRoot string - if ctr.cgroup2 { - PIDRoot = filepath.Join(cgroupRoot, ctr.path) - } else { - PIDRoot = ctr.getCgroupv1Path(Pids) - } - - current, err := readFileAsUint64(filepath.Join(PIDRoot, "pids.current")) - if err != nil { - return err - } - - m.Pids = PidsMetrics{Current: current} - return nil -} diff --git a/vendor/github.com/containers/common/pkg/cgroups/utils.go b/vendor/github.com/containers/common/pkg/cgroups/utils.go deleted file mode 100644 index a77ba4a2..00000000 --- a/vendor/github.com/containers/common/pkg/cgroups/utils.go +++ /dev/null @@ -1,179 +0,0 @@ -package cgroups - -import ( - "bytes" - "errors" - "fmt" - "os" - "path/filepath" - "strconv" - "strings" -) - -var TestMode bool - -func cleanString(s string) string { - return strings.Trim(s, "\n") -} - -func readAcct(ctr *CgroupControl, name string) (uint64, error) { - p := filepath.Join(ctr.getCgroupv1Path(CPUAcct), name) - return readFileAsUint64(p) -} - -func readAcctList(ctr *CgroupControl, name string) ([]uint64, error) { - p := filepath.Join(ctr.getCgroupv1Path(CPUAcct), name) - data, err := os.ReadFile(p) - if err != nil { - return nil, err - } - r := []uint64{} - for _, s := range strings.Split(string(data), " ") { - s = cleanString(s) - if s == "" { - break - } - v, err := strconv.ParseUint(s, 10, 64) - if err != nil { - return nil, fmt.Errorf("parsing %s: %w", s, err) - } - r = append(r, v) - } - return r, nil -} - -// GetSystemCPUUsage returns the system usage for all the cgroups -func GetSystemCPUUsage() (uint64, error) { - cgroupv2, err := IsCgroup2UnifiedMode() - if err != nil { - return 0, err - } - if !cgroupv2 { - p := filepath.Join(cgroupRoot, CPUAcct, "cpuacct.usage") - return readFileAsUint64(p) - } - - files, err := os.ReadDir(cgroupRoot) - if err != nil { - return 0, err - } - var total uint64 - for _, file := range files { - if !file.IsDir() { - continue - } - p := filepath.Join(cgroupRoot, file.Name(), "cpu.stat") - - values, err := readCgroupMapPath(p) - if err != nil { - return 0, err - } - - if val, found := values["usage_usec"]; found { - v, err := strconv.ParseUint(cleanString(val[0]), 10, 64) - if err != nil { - return 0, err - } - total += v * 1000 - } - } - return total, nil -} - -func cpusetCopyFileFromParent(dir, file string, cgroupv2 bool) ([]byte, error) { - if dir == cgroupRoot { - return nil, fmt.Errorf("could not find parent to initialize cpuset %s", file) - } - path := filepath.Join(dir, file) - parentPath := path - if cgroupv2 { - parentPath = fmt.Sprintf("%s.effective", parentPath) - } - data, err := os.ReadFile(parentPath) - if err != nil { - // if the file doesn't exist, it is likely that the cpuset controller - // is not enabled in the kernel. - if os.IsNotExist(err) { - return nil, nil - } - return nil, err - } - if strings.Trim(string(data), "\n") != "" { - return data, nil - } - data, err = cpusetCopyFileFromParent(filepath.Dir(dir), file, cgroupv2) - if err != nil { - return nil, err - } - if err := os.WriteFile(path, data, 0o644); err != nil { - return nil, fmt.Errorf("write %s: %w", path, err) - } - return data, nil -} - -func cpusetCopyFromParent(path string, cgroupv2 bool) error { - for _, file := range []string{"cpuset.cpus", "cpuset.mems"} { - if _, err := cpusetCopyFileFromParent(path, file, cgroupv2); err != nil { - return err - } - } - return nil -} - -// createCgroupv2Path creates the cgroupv2 path and enables all the available controllers -func createCgroupv2Path(path string) (deferredError error) { - if !strings.HasPrefix(path, cgroupRoot+"/") { - return fmt.Errorf("invalid cgroup path %s", path) - } - content, err := os.ReadFile(cgroupRoot + "/cgroup.controllers") - if err != nil { - return err - } - ctrs := bytes.Fields(content) - res := append([]byte("+"), bytes.Join(ctrs, []byte(" +"))...) - - current := "/sys/fs" - elements := strings.Split(path, "/") - for i, e := range elements[3:] { - current = filepath.Join(current, e) - if i > 0 { - if err := os.Mkdir(current, 0o755); err != nil { - if !os.IsExist(err) { - return err - } - } else { - // If the directory was created, be sure it is not left around on errors. - defer func() { - if deferredError != nil { - os.Remove(current) - } - }() - } - } - // We enable the controllers for all the path components except the last one. It is not allowed to add - // PIDs if there are already enabled controllers. - if i < len(elements[3:])-1 { - if err := os.WriteFile(filepath.Join(current, "cgroup.subtree_control"), res, 0o755); err != nil { - return err - } - } - } - return nil -} - -func (c *CgroupControl) createCgroupDirectory(controller string) (bool, error) { - cPath := c.getCgroupv1Path(controller) - _, err := os.Stat(cPath) - if err == nil { - return false, nil - } - - if !errors.Is(err, os.ErrNotExist) { - return false, err - } - - if err := os.MkdirAll(cPath, 0o755); err != nil { - return false, fmt.Errorf("creating cgroup for %s: %w", controller, err) - } - return true, nil -} diff --git a/vendor/github.com/containers/common/pkg/cgroups/utils_linux.go b/vendor/github.com/containers/common/pkg/cgroups/utils_linux.go index 02723636..ed9f0761 100644 --- a/vendor/github.com/containers/common/pkg/cgroups/utils_linux.go +++ b/vendor/github.com/containers/common/pkg/cgroups/utils_linux.go @@ -104,8 +104,8 @@ func ReadFile(dir, file string) (string, error) { return buf.String(), err } -// GetBlkioFiles gets the proper files for blkio weights -func GetBlkioFiles(cgroupPath string) (wtFile, wtDevFile string) { +// BlkioFiles gets the proper files for blkio weights +func BlkioFiles(cgroupPath string) (wtFile, wtDevFile string) { var weightFile string var weightDeviceFile string // in this important since runc keeps these variables private, they won't be set diff --git a/vendor/github.com/containers/common/pkg/config/config.go b/vendor/github.com/containers/common/pkg/config/config.go index 8bdccd4e..75b917f0 100644 --- a/vendor/github.com/containers/common/pkg/config/config.go +++ b/vendor/github.com/containers/common/pkg/config/config.go @@ -10,6 +10,7 @@ import ( "strings" "github.com/BurntSushi/toml" + "github.com/containers/common/internal/attributedstring" "github.com/containers/common/libnetwork/types" "github.com/containers/common/pkg/capabilities" "github.com/containers/common/pkg/util" @@ -30,24 +31,6 @@ const ( bindirPrefix = "$BINDIR" ) -// RuntimeStateStore is a constant indicating which state store implementation -// should be used by engine -type RuntimeStateStore int - -const ( - // InvalidStateStore is an invalid state store - InvalidStateStore RuntimeStateStore = iota - // InMemoryStateStore is an in-memory state that will not persist data - // on containers and pods between engine instances or after system - // reboot - InMemoryStateStore RuntimeStateStore = iota - // SQLiteStateStore is a state backed by a SQLite database - // It is presently disabled - SQLiteStateStore RuntimeStateStore = iota - // BoltDBStateStore is a state backed by a BoltDB database - BoltDBStateStore RuntimeStateStore = iota -) - var validImageVolumeModes = []string{_typeBind, "tmpfs", "ignore"} // ProxyEnv is a list of Proxy Environment variables @@ -86,17 +69,17 @@ type Config struct { // containers global options for containers tools type ContainersConfig struct { // Devices to add to all containers - Devices []string `toml:"devices,omitempty"` + Devices attributedstring.Slice `toml:"devices,omitempty"` // Volumes to add to all containers - Volumes []string `toml:"volumes,omitempty"` + Volumes attributedstring.Slice `toml:"volumes,omitempty"` // ApparmorProfile is the apparmor profile name which is used as the // default for the runtime. ApparmorProfile string `toml:"apparmor_profile,omitempty"` // Annotation to add to all containers - Annotations []string `toml:"annotations,omitempty"` + Annotations attributedstring.Slice `toml:"annotations,omitempty"` // BaseHostsFile is the path to a hosts file, the entries from this file // are added to the containers hosts file. As special value "image" is @@ -113,28 +96,28 @@ type ContainersConfig struct { // CgroupConf entries specifies a list of cgroup files to write to and their values. For example // "memory.high=1073741824" sets the memory.high limit to 1GB. - CgroupConf []string `toml:"cgroup_conf,omitempty"` + CgroupConf attributedstring.Slice `toml:"cgroup_conf,omitempty"` // Capabilities to add to all containers. - DefaultCapabilities []string `toml:"default_capabilities,omitempty"` + DefaultCapabilities attributedstring.Slice `toml:"default_capabilities,omitempty"` // Sysctls to add to all containers. - DefaultSysctls []string `toml:"default_sysctls,omitempty"` + DefaultSysctls attributedstring.Slice `toml:"default_sysctls,omitempty"` // DefaultUlimits specifies the default ulimits to apply to containers - DefaultUlimits []string `toml:"default_ulimits,omitempty"` + DefaultUlimits attributedstring.Slice `toml:"default_ulimits,omitempty"` // DefaultMountsFile is the path to the default mounts file for testing DefaultMountsFile string `toml:"-"` // DNSServers set default DNS servers. - DNSServers []string `toml:"dns_servers,omitempty"` + DNSServers attributedstring.Slice `toml:"dns_servers,omitempty"` // DNSOptions set default DNS options. - DNSOptions []string `toml:"dns_options,omitempty"` + DNSOptions attributedstring.Slice `toml:"dns_options,omitempty"` // DNSSearches set default DNS search domains. - DNSSearches []string `toml:"dns_searches,omitempty"` + DNSSearches attributedstring.Slice `toml:"dns_searches,omitempty"` // EnableKeyring tells the container engines whether to create // a kernel keyring for use within the container @@ -151,7 +134,7 @@ type ContainersConfig struct { EnableLabeledUsers bool `toml:"label_users,omitempty"` // Env is the environment variable list for container process. - Env []string `toml:"env,omitempty"` + Env attributedstring.Slice `toml:"env,omitempty"` // EnvHost Pass all host environment variables into the container. EnvHost bool `toml:"env_host,omitempty"` @@ -167,6 +150,8 @@ type ContainersConfig struct { Init bool `toml:"init,omitempty"` // InitPath is the path for init to run if the Init bool is enabled + // + // Deprecated: Do not use this field directly use conf.FindInitBinary() instead. InitPath string `toml:"init_path,omitempty"` // IPCNS way to create a ipc namespace for the container @@ -187,7 +172,7 @@ type ContainersConfig struct { LogTag string `toml:"log_tag,omitempty"` // Mount to add to all containers - Mounts []string `toml:"mounts,omitempty"` + Mounts attributedstring.Slice `toml:"mounts,omitempty"` // NetNS indicates how to create a network namespace for the container NetNS string `toml:"netns,omitempty"` @@ -213,6 +198,18 @@ type ContainersConfig struct { // performance implications. PrepareVolumeOnCreate bool `toml:"prepare_volume_on_create,omitempty"` + // Give extended privileges to all containers. A privileged container + // turns off the security features that isolate the container from the + // host. Dropped Capabilities, limited devices, read-only mount points, + // Apparmor/SELinux separation, and Seccomp filters are all disabled. + // Due to the disabled security features the privileged field should + // almost never be set as containers can easily break out of + // confinment. + // + // Containers running in a user namespace (e.g., rootless containers) + // cannot have more privileges than the user that launched them. + Privileged bool `toml:"privileged,omitempty"` + // ReadOnly causes engine to run all containers with root file system mounted read-only ReadOnly bool `toml:"read_only,omitempty"` @@ -254,15 +251,15 @@ type EngineConfig struct { // ConmonEnvVars are environment variables to pass to the Conmon binary // when it is launched. - ConmonEnvVars []string `toml:"conmon_env_vars,omitempty"` + ConmonEnvVars attributedstring.Slice `toml:"conmon_env_vars,omitempty"` // ConmonPath is the path to the Conmon binary used for managing containers. // The first path pointing to a valid file will be used. - ConmonPath []string `toml:"conmon_path,omitempty"` + ConmonPath attributedstring.Slice `toml:"conmon_path,omitempty"` // ConmonRsPath is the path to the Conmon-rs binary used for managing containers. // The first path pointing to a valid file will be used. - ConmonRsPath []string `toml:"conmonrs_path,omitempty"` + ConmonRsPath attributedstring.Slice `toml:"conmonrs_path,omitempty"` // CompatAPIEnforceDockerHub enforces using docker.io for completing // short names in Podman's compatibility REST API. Note that this will @@ -274,7 +271,7 @@ type EngineConfig struct { // compose command. The first found provider is used for execution. // Can be an absolute and relative path or a (file) name. Make sure to // expand the return items via `os.ExpandEnv`. - ComposeProviders []string `toml:"compose_providers,omitempty"` + ComposeProviders attributedstring.Slice `toml:"compose_providers,omitempty"` // ComposeWarningLogs emits logs on each invocation of the compose // command indicating that an external compose provider is being @@ -297,7 +294,7 @@ type EngineConfig struct { EnablePortReservation bool `toml:"enable_port_reservation,omitempty"` // Environment variables to be used when running the container engine (e.g., Podman, Buildah). For example "http_proxy=internal.proxy.company.com" - Env []string `toml:"env,omitempty"` + Env attributedstring.Slice `toml:"env,omitempty"` // EventsLogFilePath is where the events log is stored. EventsLogFilePath string `toml:"events_logfile_path,omitempty"` @@ -319,12 +316,12 @@ type EngineConfig struct { // HelperBinariesDir is a list of directories which are used to search for // helper binaries. - HelperBinariesDir []string `toml:"helper_binaries_dir"` + HelperBinariesDir attributedstring.Slice `toml:"helper_binaries_dir,omitempty"` // configuration files. When the same filename is present in // multiple directories, the file in the directory listed last in // this slice takes precedence. - HooksDir []string `toml:"hooks_dir,omitempty"` + HooksDir attributedstring.Slice `toml:"hooks_dir,omitempty"` // ImageBuildFormat (DEPRECATED) indicates the default image format to // building container images. Should use ImageDefaultFormat @@ -357,6 +354,8 @@ type EngineConfig struct { InfraImage string `toml:"infra_image,omitempty"` // InitPath is the path to the container-init binary. + // + // Deprecated: Do not use this field directly use conf.FindInitBinary() instead. InitPath string `toml:"init_path,omitempty"` // KubeGenerateType sets the Kubernetes kind/specification to generate by default @@ -389,7 +388,7 @@ type EngineConfig struct { // NetworkCmdOptions is the default options to pass to the slirp4netns binary. // For example "allow_host_loopback=true" - NetworkCmdOptions []string `toml:"network_cmd_options,omitempty"` + NetworkCmdOptions attributedstring.Slice `toml:"network_cmd_options,omitempty"` // NoPivotRoot sets whether to set no-pivot-root in the OCI runtime. NoPivotRoot bool `toml:"no_pivot_root,omitempty"` @@ -429,7 +428,7 @@ type EngineConfig struct { ActiveService string `toml:"active_service,omitempty"` // Add existing instances with requested compression algorithms to manifest list - AddCompression []string `toml:"add_compression,omitempty"` + AddCompression attributedstring.Slice `toml:"add_compression,omitempty"` // ServiceDestinations mapped by service Names ServiceDestinations map[string]Destination `toml:"service_destinations,omitempty"` @@ -441,19 +440,19 @@ type EngineConfig struct { // The first path pointing to a valid file will be used This is used only // when there are no OCIRuntime/OCIRuntimes defined. It is used only to be // backward compatible with older versions of Podman. - RuntimePath []string `toml:"runtime_path,omitempty"` + RuntimePath attributedstring.Slice `toml:"runtime_path,omitempty"` // RuntimeSupportsJSON is the list of the OCI runtimes that support // --format=json. - RuntimeSupportsJSON []string `toml:"runtime_supports_json,omitempty"` + RuntimeSupportsJSON attributedstring.Slice `toml:"runtime_supports_json,omitempty"` // RuntimeSupportsNoCgroups is a list of OCI runtimes that support // running containers without CGroups. - RuntimeSupportsNoCgroups []string `toml:"runtime_supports_nocgroup,omitempty"` + RuntimeSupportsNoCgroups attributedstring.Slice `toml:"runtime_supports_nocgroup,omitempty"` // RuntimeSupportsKVM is a list of OCI runtimes that support // KVM separation for containers. - RuntimeSupportsKVM []string `toml:"runtime_supports_kvm,omitempty"` + RuntimeSupportsKVM attributedstring.Slice `toml:"runtime_supports_kvm,omitempty"` // SetOptions contains a subset of config options. It's used to indicate if // a given option has either been set by the user or by the parsed @@ -471,13 +470,6 @@ type EngineConfig struct { // readiness using the SD_NOTIFY mechanism. SDNotify bool `toml:"-"` - // StateType is the type of the backing state store. Avoid using multiple - // values for this with the same containers/storage configuration on the - // same system. Different state types do not interact, and each will see a - // separate set of containers, which may cause conflicts in - // containers/storage. As such this is not exposed via the config file. - StateType RuntimeStateStore `toml:"-"` - // ServiceTimeout is the number of seconds to wait without a connection // before the `podman system service` times out and exits ServiceTimeout uint `toml:"service_timeout,omitempty,omitzero"` @@ -561,24 +553,6 @@ type SetOptions struct { // backwards compatibility with older versions of libpod for which we must // query the database configuration. Not included in the on-disk config. StorageConfigGraphDriverNameSet bool `toml:"-"` - - // StaticDirSet indicates if the StaticDir has been explicitly set by the - // config or by the user. It's required to guarantee backwards compatibility - // with older versions of libpod for which we must query the database - // configuration. Not included in the on-disk config. - StaticDirSet bool `toml:"-"` - - // VolumePathSet indicates if the VolumePath has been explicitly set by the - // config or by the user. It's required to guarantee backwards compatibility - // with older versions of libpod for which we must query the database - // configuration. Not included in the on-disk config. - VolumePathSet bool `toml:"-"` - - // TmpDirSet indicates if the TmpDir has been explicitly set by the config - // or by the user. It's required to guarantee backwards compatibility with - // older versions of libpod for which we must query the database - // configuration. Not included in the on-disk config. - TmpDirSet bool `toml:"-"` } // NetworkConfig represents the "network" TOML config table @@ -588,10 +562,10 @@ type NetworkConfig struct { NetworkBackend string `toml:"network_backend,omitempty"` // CNIPluginDirs is where CNI plugin binaries are stored. - CNIPluginDirs []string `toml:"cni_plugin_dirs,omitempty"` + CNIPluginDirs attributedstring.Slice `toml:"cni_plugin_dirs,omitempty"` // NetavarkPluginDirs is a list of directories which contain netavark plugins. - NetavarkPluginDirs []string `toml:"netavark_plugin_dirs,omitempty"` + NetavarkPluginDirs attributedstring.Slice `toml:"netavark_plugin_dirs,omitempty"` // DefaultNetwork is the network name of the default network // to attach pods to. @@ -624,7 +598,7 @@ type NetworkConfig struct { // PastaOptions contains a default list of pasta(1) options that should // be used when running pasta. - PastaOptions []string `toml:"pasta_options,omitempty"` + PastaOptions attributedstring.Slice `toml:"pasta_options,omitempty"` } type SubnetPool struct { @@ -675,12 +649,12 @@ type MachineConfig struct { // User to use for rootless podman when init-ing a podman machine VM User string `toml:"user,omitempty"` // Volumes are host directories mounted into the VM by default. - Volumes []string `toml:"volumes"` + Volumes attributedstring.Slice `toml:"volumes,omitempty"` // Provider is the virtualization provider used to run podman-machine VM Provider string `toml:"provider,omitempty"` } -// FarmConfig represents the "farm" TOML config tabls +// FarmConfig represents the "farm" TOML config tables type FarmConfig struct { // Default is the default farm to be used when farming out builds Default string `toml:"default,omitempty"` @@ -740,12 +714,15 @@ func (c *Config) CheckCgroupsAndAdjustConfig() { } func (c *Config) addCAPPrefix() { - for i, val := range c.Containers.DefaultCapabilities { + caps := c.Containers.DefaultCapabilities.Get() + newCaps := make([]string, 0, len(caps)) + for _, val := range caps { if !strings.HasPrefix(strings.ToLower(val), "cap_") { val = "CAP_" + strings.ToUpper(val) } - c.Containers.DefaultCapabilities[i] = val + newCaps = append(newCaps, val) } + c.Containers.DefaultCapabilities.Set(newCaps) } // Validate is the main entry point for library configuration validation. @@ -880,7 +857,7 @@ func (c *NetworkConfig) Validate() error { // to first (version) matching conmon binary. If non is found, we try // to do a path lookup of "conmon". func (c *Config) FindConmon() (string, error) { - return findConmonPath(c.Engine.ConmonPath, "conmon") + return findConmonPath(c.Engine.ConmonPath.Get(), "conmon") } func findConmonPath(paths []string, binaryName string) (string, error) { @@ -910,7 +887,7 @@ func findConmonPath(paths []string, binaryName string) (string, error) { // to first (version) matching conmonrs binary. If non is found, we try // to do a path lookup of "conmonrs". func (c *Config) FindConmonRs() (string, error) { - return findConmonPath(c.Engine.ConmonRsPath, "conmonrs") + return findConmonPath(c.Engine.ConmonRsPath.Get(), "conmonrs") } // GetDefaultEnv returns the environment variables for the container. @@ -934,7 +911,7 @@ func (c *Config) GetDefaultEnvEx(envHost, httpProxy bool) []string { } } } - return append(env, c.Containers.Env...) + return append(env, c.Containers.Env.Get()...) } // Capabilities returns the capabilities parses the Add and Drop capability @@ -947,7 +924,7 @@ func (c *Config) Capabilities(user string, addCapabilities, dropCapabilities []s return true } - defaultCapabilities := c.Containers.DefaultCapabilities + defaultCapabilities := c.Containers.DefaultCapabilities.Get() if userNotRoot(user) { defaultCapabilities = []string{} } @@ -1100,34 +1077,6 @@ func Reload() (*Config, error) { return New(&Options{SetDefault: true}) } -func (c *Config) ActiveDestination() (uri, identity string, machine bool, err error) { - if uri, found := os.LookupEnv("CONTAINER_HOST"); found { - if v, found := os.LookupEnv("CONTAINER_SSHKEY"); found { - identity = v - } - return uri, identity, false, nil - } - connEnv := os.Getenv("CONTAINER_CONNECTION") - switch { - case connEnv != "": - d, found := c.Engine.ServiceDestinations[connEnv] - if !found { - return "", "", false, fmt.Errorf("environment variable CONTAINER_CONNECTION=%q service destination not found", connEnv) - } - return d.URI, d.Identity, d.IsMachine, nil - - case c.Engine.ActiveService != "": - d, found := c.Engine.ServiceDestinations[c.Engine.ActiveService] - if !found { - return "", "", false, fmt.Errorf("%q service destination not found", c.Engine.ActiveService) - } - return d.URI, d.Identity, d.IsMachine, nil - case c.Engine.RemoteURI != "": - return c.Engine.RemoteURI, c.Engine.RemoteIdentity, false, nil - } - return "", "", false, errors.New("no service destination configured") -} - var ( bindirFailed = false bindirCached = "" @@ -1156,7 +1105,7 @@ func findBindir() string { // FindHelperBinary will search the given binary name in the configured directories. // If searchPATH is set to true it will also search in $PATH. func (c *Config) FindHelperBinary(name string, searchPATH bool) (string, error) { - dirList := c.Engine.HelperBinariesDir + dirList := c.Engine.HelperBinariesDir.Get() bindirPath := "" bindirSearched := false @@ -1197,7 +1146,7 @@ func (c *Config) FindHelperBinary(name string, searchPATH bool) (string, error) return exec.LookPath(name) } configHint := "To resolve this error, set the helper_binaries_dir key in the `[engine]` section of containers.conf to the directory containing your helper binaries." - if len(c.Engine.HelperBinariesDir) == 0 { + if len(c.Engine.HelperBinariesDir.Get()) == 0 { return "", fmt.Errorf("could not find %q because there are no helper binary directories configured. %s", name, configHint) } return "", fmt.Errorf("could not find %q in one of %v. %s", name, c.Engine.HelperBinariesDir, configHint) @@ -1224,7 +1173,7 @@ func (c *Config) ImageCopyTmpDir() (string, error) { // setupEnv sets the environment variables for the engine func (c *Config) setupEnv() error { - for _, env := range c.Engine.Env { + for _, env := range c.Engine.Env.Get() { splitEnv := strings.SplitN(env, "=", 2) if len(splitEnv) != 2 { logrus.Warnf("invalid environment variable for engine %s, valid configuration is KEY=value pair", env) @@ -1282,3 +1231,20 @@ func ValidateImageVolumeMode(mode string) error { return fmt.Errorf("invalid image volume mode %q required value: %s", mode, strings.Join(validImageVolumeModes, ", ")) } + +// FindInitBinary will return the path to the init binary (catatonit) +func (c *Config) FindInitBinary() (string, error) { + // Sigh, for some reason we ended up with two InitPath field in containers.conf and + // both are used in podman so we have to keep supporting both to prevent regressions. + if c.Containers.InitPath != "" { + return c.Containers.InitPath, nil + } + if c.Engine.InitPath != "" { + return c.Engine.InitPath, nil + } + // keep old default working to guarantee backwards comapt + if _, err := os.Stat(DefaultInitPath); err == nil { + return DefaultInitPath, nil + } + return c.FindHelperBinary(defaultInitName, true) +} diff --git a/vendor/github.com/containers/common/pkg/config/config_local.go b/vendor/github.com/containers/common/pkg/config/config_local.go index 10d40ddd..dae3ea0d 100644 --- a/vendor/github.com/containers/common/pkg/config/config_local.go +++ b/vendor/github.com/containers/common/pkg/config/config_local.go @@ -10,8 +10,8 @@ import ( "regexp" "strings" - "github.com/container-orchestrated-devices/container-device-interface/pkg/parser" units "github.com/docker/go-units" + "tags.cncf.io/container-device-interface/pkg/parser" ) func (c *EngineConfig) validatePaths() error { @@ -31,7 +31,7 @@ func (c *EngineConfig) validatePaths() error { } func (c *ContainersConfig) validateDevices() error { - for _, d := range c.Devices { + for _, d := range c.Devices.Get() { if parser.IsQualifiedName(d) { continue } @@ -44,7 +44,7 @@ func (c *ContainersConfig) validateDevices() error { } func (c *ContainersConfig) validateUlimits() error { - for _, u := range c.DefaultUlimits { + for _, u := range c.DefaultUlimits.Get() { ul, err := units.ParseUlimit(u) if err != nil { return fmt.Errorf("unrecognized ulimit %s: %w", u, err) diff --git a/vendor/github.com/containers/common/pkg/config/containers.conf b/vendor/github.com/containers/common/pkg/config/containers.conf index 54f20d71..8c532f07 100644 --- a/vendor/github.com/containers/common/pkg/config/containers.conf +++ b/vendor/github.com/containers/common/pkg/config/containers.conf @@ -149,6 +149,9 @@ default_sysctls = [ #init = false # Container init binary, if init=true, this is the init binary to be used for containers. +# If this option is not set catatonit is searched in the directories listed under +# the helper_binaries_dir option. It is recommended to just install catatonit +# there instead of configuring this option here. # #init_path = "/usr/libexec/podman/catatonit" @@ -237,6 +240,18 @@ default_sysctls = [ # #prepare_volume_on_create = false +# Give extended privileges to all containers. A privileged container turns off +# the security features that isolate the container from the host. Dropped +# Capabilities, limited devices, read-only mount points, Apparmor/SELinux +# separation, and Seccomp filters are all disabled. Due to the disabled +# security features the privileged field should almost never be set as +# containers can easily break out of confinment. +# +# Containers running in a user namespace (e.g., rootless containers) cannot +# have more privileges than the user that launched them. +# +#privileged = false + # Run all containers with root file system mounted read-only # # read_only = false @@ -442,10 +457,14 @@ default_sysctls = [ # short-name aliases defined in containers-registries.conf(5). #compat_api_enforce_docker_hub = true -# The database backend of Podman. Supported values are "boltdb" (default) and -# "sqlite". Please run `podman-system-reset` prior to changing the database +# The database backend of Podman. Supported values are "" (default), "boltdb" +# and "sqlite". An empty value means it will check whenever a boltdb already +# exists and use it when it does, otherwise it will use sqlite as default +# (e.g. new installs). This allows for backwards compatibility with older versions. +# Please run `podman-system-reset` prior to changing the database # backend of an existing deployment, to make sure Podman can operate correctly. -#database_backend="boltdb" +# +#database_backend = "" # Specify the keys sequence used to detach a container. # Format is a single character [a-Z] or a comma separated sequence of @@ -669,8 +688,8 @@ default_sysctls = [ # [engine.service_destinations.production] # URI to access the Podman service # Examples: -# rootless "unix://run/user/$UID/podman/podman.sock" (Default) -# rootful "unix://run/podman/podman.sock (Default) +# rootless "unix:///run/user/$UID/podman/podman.sock" (Default) +# rootful "unix:///run/podman/podman.sock (Default) # remote rootless ssh://engineering.lab.company.com/run/user/1000/podman/podman.sock # remote rootful ssh://root@10.10.1.136:22/run/podman/podman.sock # diff --git a/vendor/github.com/containers/common/pkg/config/containers.conf-freebsd b/vendor/github.com/containers/common/pkg/config/containers.conf-freebsd index 13b7918d..f471e307 100644 --- a/vendor/github.com/containers/common/pkg/config/containers.conf-freebsd +++ b/vendor/github.com/containers/common/pkg/config/containers.conf-freebsd @@ -29,10 +29,14 @@ # #base_hosts_file = "" -# The database backend of Podman. Supported values are "boltdb" (default) and -# "sqlite". Please run `podman-system-reset` prior to changing the database +# The database backend of Podman. Supported values are "" (default), "boltdb" +# and "sqlite". An empty value means it will check whenever a boltdb already +# exists and use it when it does, otherwise it will use sqlite as default +# (e.g. new installs). This allows for backwards compatibility with older versions. +# Please run `podman-system-reset` prior to changing the database # backend of an existing deployment, to make sure Podman can operate correctly. -#database_backend="boltdb" +# +#database_backend = "" # List of default capabilities for containers. If it is empty or commented out, # the default capabilities defined in the container engine will be added. @@ -129,6 +133,9 @@ default_sysctls = [ #init = false # Container init binary, if init=true, this is the init binary to be used for containers. +# If this option is not set catatonit is searched in the directories listed under +# the helper_binaries_dir option. It is recommended to just install catatonit +# there instead of configuring this option here. # #init_path = "/usr/local/libexec/podman/catatonit" @@ -200,6 +207,18 @@ default_sysctls = [ # #prepare_volume_on_create = false +# Give extended privileges to all containers. A privileged container turns off +# the security features that isolate the container from the host. Dropped +# Capabilities, limited devices, read-only mount points, Apparmor/SELinux +# separation, and Seccomp filters are all disabled. Due to the disabled +# security features the privileged field should almost never be set as +# containers can easily break out of confinment. +# +# Containers running in a user namespace (e.g., rootless containers) cannot +# have more privileges than the user that launched them. +# +#privileged = false + # Set timezone in container. Takes IANA timezones as well as "local", # which sets the timezone in the container to match the host machine. # @@ -541,8 +560,8 @@ default_sysctls = [ # [service_destinations.production] # URI to access the Podman service # Examples: -# rootless "unix://run/user/$UID/podman/podman.sock" (Default) -# rootful "unix://run/podman/podman.sock (Default) +# rootless "unix:///run/user/$UID/podman/podman.sock" (Default) +# rootful "unix:///run/podman/podman.sock (Default) # remote rootless ssh://engineering.lab.company.com/run/user/1000/podman/podman.sock # remote rootful ssh://root@10.10.1.136:22/run/podman/podman.sock # diff --git a/vendor/github.com/containers/common/pkg/config/db_backend.go b/vendor/github.com/containers/common/pkg/config/db_backend.go index 8fd78165..a2fda8e0 100644 --- a/vendor/github.com/containers/common/pkg/config/db_backend.go +++ b/vendor/github.com/containers/common/pkg/config/db_backend.go @@ -13,6 +13,12 @@ const ( // SQLite backend. DBBackendSQLite + // DBBackendDefault describes that no explicit backend has been set. + // It should default to sqlite unless there is already an existing boltdb, + // this allows for backwards compatibility on upgrades. The actual detection + // logic must live in podman as we only know there were to look for the file. + DBBackendDefault + stringBoltDB = "boltdb" stringSQLite = "sqlite" ) @@ -24,6 +30,8 @@ func (d DBBackend) String() string { return stringBoltDB case DBBackendSQLite: return stringSQLite + case DBBackendDefault: + return "" default: return fmt.Sprintf("unsupported database backend: %d", d) } @@ -32,7 +40,7 @@ func (d DBBackend) String() string { // Validate returns whether the DBBackend is supported. func (d DBBackend) Validate() error { switch d { - case DBBackendBoltDB, DBBackendSQLite: + case DBBackendBoltDB, DBBackendSQLite, DBBackendDefault: return nil default: return fmt.Errorf("unsupported database backend: %d", d) @@ -49,12 +57,9 @@ func ParseDBBackend(raw string) (DBBackend, error) { return DBBackendBoltDB, nil case stringSQLite: return DBBackendSQLite, nil + case "": + return DBBackendDefault, nil default: return DBBackendUnsupported, fmt.Errorf("unsupported database backend: %q", raw) } } - -// DBBackend returns the configured database backend. -func (c *Config) DBBackend() (DBBackend, error) { - return ParseDBBackend(c.Engine.DBBackend) -} diff --git a/vendor/github.com/containers/common/pkg/config/default.go b/vendor/github.com/containers/common/pkg/config/default.go index 942c0406..b60c4345 100644 --- a/vendor/github.com/containers/common/pkg/config/default.go +++ b/vendor/github.com/containers/common/pkg/config/default.go @@ -6,8 +6,10 @@ import ( "net" "os" "path/filepath" + "runtime" "strings" + "github.com/containers/common/internal/attributedstring" nettypes "github.com/containers/common/libnetwork/types" "github.com/containers/common/pkg/apparmor" "github.com/containers/common/pkg/cgroupv2" @@ -29,9 +31,36 @@ const ( // _defaultImageVolumeMode is a mode to handle built-in image volumes. _defaultImageVolumeMode = _typeBind + + // defaultInitName is the default name of the init binary + defaultInitName = "catatonit" ) var ( + DefaultMaskedPaths = []string{ + "/proc/acpi", + "/proc/kcore", + "/proc/keys", + "/proc/latency_stats", + "/proc/sched_debug", + "/proc/scsi", + "/proc/timer_list", + "/proc/timer_stats", + "/sys/dev/block", + "/sys/devices/virtual/powercap", + "/sys/firmware", + "/sys/fs/selinux", + } + + DefaultReadOnlyPaths = []string{ + "/proc/asound", + "/proc/bus", + "/proc/fs", + "/proc/irq", + "/proc/sys", + "/proc/sysrq-trigger", + } + // DefaultInfraImage is the default image to run as infrastructure containers in pods. DefaultInfraImage = "" // DefaultRootlessSHMLockPath is the default path for rootless SHM locks. @@ -97,6 +126,8 @@ var ( "/usr/libexec/docker/cli-plugins/docker-compose", "podman-compose", } + + defaultContainerEnv = []string{"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"} ) // nolint:unparam @@ -119,8 +150,6 @@ const ( CgroupfsCgroupsManager = "cgroupfs" // DefaultApparmorProfile specifies the default apparmor profile for the container. DefaultApparmorProfile = apparmor.Profile - // DefaultDBBackend specifies the default database backend to be used by Podman. - DefaultDBBackend = DBBackendBoltDB // DefaultHostsFile is the default path to the hosts file. DefaultHostsFile = "/etc/hosts" // SystemdCgroupsManager represents systemd native cgroup manager. @@ -188,41 +217,39 @@ func defaultConfig() (*Config, error) { return &Config{ Containers: ContainersConfig{ - Annotations: []string{}, + Annotations: attributedstring.Slice{}, ApparmorProfile: DefaultApparmorProfile, BaseHostsFile: "", CgroupNS: cgroupNS, Cgroups: getDefaultCgroupsMode(), - DNSOptions: []string{}, - DNSSearches: []string{}, - DNSServers: []string{}, - DefaultCapabilities: DefaultCapabilities, - DefaultSysctls: []string{}, - DefaultUlimits: getDefaultProcessLimits(), - Devices: []string{}, + DNSOptions: attributedstring.Slice{}, + DNSSearches: attributedstring.Slice{}, + DNSServers: attributedstring.Slice{}, + DefaultCapabilities: attributedstring.NewSlice(DefaultCapabilities), + DefaultSysctls: attributedstring.Slice{}, + DefaultUlimits: attributedstring.NewSlice(getDefaultProcessLimits()), + Devices: attributedstring.Slice{}, EnableKeyring: true, EnableLabeling: selinuxEnabled(), - Env: []string{ - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", - }, - EnvHost: false, - HTTPProxy: true, - IPCNS: "shareable", - Init: false, - InitPath: "", - LogDriver: defaultLogDriver(), - LogSizeMax: DefaultLogSizeMax, - Mounts: []string{}, - NetNS: "private", - NoHosts: false, - PidNS: "private", - PidsLimit: DefaultPidsLimit, - ShmSize: DefaultShmSize, - TZ: "", - UTSNS: "private", - Umask: "0022", - UserNSSize: DefaultUserNSSize, // Deprecated - Volumes: []string{}, + Env: attributedstring.NewSlice(defaultContainerEnv), + EnvHost: false, + HTTPProxy: true, + IPCNS: "shareable", + Init: false, + InitPath: "", + LogDriver: defaultLogDriver(), + LogSizeMax: DefaultLogSizeMax, + Mounts: attributedstring.Slice{}, + NetNS: "private", + NoHosts: false, + PidNS: "private", + PidsLimit: DefaultPidsLimit, + ShmSize: DefaultShmSize, + TZ: "", + UTSNS: "private", + Umask: "0022", + UserNSSize: DefaultUserNSSize, // Deprecated + Volumes: attributedstring.Slice{}, }, Network: NetworkConfig{ DefaultNetwork: "podman", @@ -230,8 +257,8 @@ func defaultConfig() (*Config, error) { DefaultSubnetPools: DefaultSubnetPools, DefaultRootlessNetworkCmd: "slirp4netns", DNSBindPort: 0, - CNIPluginDirs: DefaultCNIPluginDirs, - NetavarkPluginDirs: DefaultNetavarkPluginDirs, + CNIPluginDirs: attributedstring.NewSlice(DefaultCNIPluginDirs), + NetavarkPluginDirs: attributedstring.NewSlice(DefaultNetavarkPluginDirs), }, Engine: *defaultEngineConfig, Secrets: defaultSecretConfig(), @@ -250,13 +277,17 @@ func defaultSecretConfig() SecretConfig { // defaultMachineConfig returns the default machine configuration. func defaultMachineConfig() MachineConfig { + cpus := runtime.NumCPU() / 2 + if cpus == 0 { + cpus = 1 + } return MachineConfig{ - CPUs: 1, + CPUs: uint64(cpus), DiskSize: 100, Image: getDefaultMachineImage(), Memory: 2048, User: getDefaultMachineUser(), - Volumes: getDefaultMachineVolumes(), + Volumes: attributedstring.NewSlice(getDefaultMachineVolumes()), } } @@ -268,7 +299,7 @@ func defaultFarmConfig() FarmConfig { } } -// defaultEngineConfig eturns a default engine configuration. Note that the +// defaultEngineConfig returns a default engine configuration. Note that the // config is different for root and rootless. It also parses the storage.conf. func defaultEngineConfig() (*EngineConfig, error) { c := new(EngineConfig) @@ -281,7 +312,7 @@ func defaultEngineConfig() (*EngineConfig, error) { c.EventsLogFileMaxSize = eventsLogMaxSize(DefaultEventsLogSizeMax) c.CompatAPIEnforceDockerHub = true - c.ComposeProviders = getDefaultComposeProviders() // may vary across supported platforms + c.ComposeProviders.Set(getDefaultComposeProviders()) // may vary across supported platforms c.ComposeWarningLogs = true if path, ok := os.LookupEnv("CONTAINERS_STORAGE_CONF"); ok { @@ -301,20 +332,17 @@ func defaultEngineConfig() (*EngineConfig, error) { c.graphRoot = storeOpts.GraphRoot c.ImageCopyTmpDir = getDefaultTmpDir() - c.StaticDir = filepath.Join(storeOpts.GraphRoot, "libpod") - c.VolumePath = filepath.Join(storeOpts.GraphRoot, "volumes") c.VolumePluginTimeout = DefaultVolumePluginTimeout c.CompressionFormat = "gzip" - c.HelperBinariesDir = defaultHelperBinariesDir + c.HelperBinariesDir.Set(defaultHelperBinariesDir) if additionalHelperBinariesDir != "" { - c.HelperBinariesDir = append(c.HelperBinariesDir, additionalHelperBinariesDir) + c.HelperBinariesDir.Set(append(c.HelperBinariesDir.Get(), additionalHelperBinariesDir)) } - c.HooksDir = DefaultHooksDirs + c.HooksDir.Set(DefaultHooksDirs) c.ImageDefaultTransport = _defaultTransport c.ImageVolumeMode = _defaultImageVolumeMode - c.StateType = BoltDBStateStore c.ImageBuildFormat = "oci" @@ -397,10 +425,8 @@ func defaultEngineConfig() (*EngineConfig, error) { // Needs to be called after populating c.OCIRuntimes. c.OCIRuntime = c.findRuntime() - c.ConmonEnvVars = []string{ - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", - } - c.ConmonPath = []string{ + c.ConmonEnvVars.Set([]string{"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"}) + c.ConmonPath.Set([]string{ "/usr/libexec/podman/conmon", "/usr/local/libexec/podman/conmon", "/usr/local/lib/podman/conmon", @@ -409,8 +435,8 @@ func defaultEngineConfig() (*EngineConfig, error) { "/usr/local/bin/conmon", "/usr/local/sbin/conmon", "/run/current-system/sw/bin/conmon", - } - c.ConmonRsPath = []string{ + }) + c.ConmonRsPath.Set([]string{ "/usr/libexec/podman/conmonrs", "/usr/local/libexec/podman/conmonrs", "/usr/local/lib/podman/conmonrs", @@ -419,10 +445,9 @@ func defaultEngineConfig() (*EngineConfig, error) { "/usr/local/bin/conmonrs", "/usr/local/sbin/conmonrs", "/run/current-system/sw/bin/conmonrs", - } + }) c.PullPolicy = DefaultPullPolicy - c.DBBackend = stringBoltDB - c.RuntimeSupportsJSON = []string{ + c.RuntimeSupportsJSON.Set([]string{ "crun", "runc", "kata", @@ -430,10 +455,9 @@ func defaultEngineConfig() (*EngineConfig, error) { "youki", "krun", "ocijail", - } - c.RuntimeSupportsNoCgroups = []string{"crun", "krun"} - c.RuntimeSupportsKVM = []string{"kata", "kata-runtime", "kata-qemu", "kata-fc", "krun"} - c.InitPath = DefaultInitPath + }) + c.RuntimeSupportsNoCgroups.Set([]string{"crun", "krun"}) + c.RuntimeSupportsKVM.Set([]string{"kata", "kata-runtime", "kata-qemu", "kata-fc", "krun"}) c.NoPivotRoot = false c.InfraImage = DefaultInfraImage @@ -503,47 +527,42 @@ func (c *Config) SecurityOptions() []string { // Sysctls returns the default sysctls to set in containers. func (c *Config) Sysctls() []string { - return c.Containers.DefaultSysctls + return c.Containers.DefaultSysctls.Get() } // Volumes returns the default set of volumes that should be mounted in containers. func (c *Config) Volumes() []string { - return c.Containers.Volumes + return c.Containers.Volumes.Get() } // Mounts returns the default set of mounts that should be mounted in containers. func (c *Config) Mounts() []string { - return c.Containers.Mounts + return c.Containers.Mounts.Get() } // Devices returns the default additional devices for containers. func (c *Config) Devices() []string { - return c.Containers.Devices + return c.Containers.Devices.Get() } // DNSServers returns the default DNS servers to add to resolv.conf in containers. func (c *Config) DNSServers() []string { - return c.Containers.DNSServers + return c.Containers.DNSServers.Get() } // DNSSerches returns the default DNS searches to add to resolv.conf in containers. func (c *Config) DNSSearches() []string { - return c.Containers.DNSSearches + return c.Containers.DNSSearches.Get() } // DNSOptions returns the default DNS options to add to resolv.conf in containers. func (c *Config) DNSOptions() []string { - return c.Containers.DNSOptions + return c.Containers.DNSOptions.Get() } // Env returns the default additional environment variables to add to containers. func (c *Config) Env() []string { - return c.Containers.Env -} - -// InitPath returns location where init program added to containers when users specify the --init flag. -func (c *Config) InitPath() string { - return c.Containers.InitPath + return c.Containers.Env.Get() } // IPCNS returns the default IPC Namespace configuration to run containers with. @@ -578,7 +597,7 @@ func (c *Config) ShmSize() string { // Ulimits returns the default ulimits to use in containers. func (c *Config) Ulimits() []string { - return c.Containers.DefaultUlimits + return c.Containers.DefaultUlimits.Get() } // PidsLimit returns the default maximum number of pids to use in containers. @@ -623,7 +642,7 @@ func (c *Config) MachineEnabled() bool { // MachineVolumes returns volumes to mount into the VM. func (c *Config) MachineVolumes() ([]string, error) { - return machineVolumes(c.Machine.Volumes) + return machineVolumes(c.Machine.Volumes.Get()) } func machineVolumes(volumes []string) ([]string, error) { diff --git a/vendor/github.com/containers/common/pkg/filters/filters.go b/vendor/github.com/containers/common/pkg/filters/filters.go index 729061db..3d442a53 100644 --- a/vendor/github.com/containers/common/pkg/filters/filters.go +++ b/vendor/github.com/containers/common/pkg/filters/filters.go @@ -5,9 +5,11 @@ import ( "fmt" "net/http" "path/filepath" + "regexp" "strings" "time" + "github.com/containers/common/libnetwork/types" "github.com/containers/common/pkg/timetype" ) @@ -105,13 +107,7 @@ func PrepareFilters(r *http.Request) (map[string][]string, error) { func MatchLabelFilters(filterValues []string, labels map[string]string) bool { outer: for _, filterValue := range filterValues { - filterArray := strings.SplitN(filterValue, "=", 2) - filterKey := filterArray[0] - if len(filterArray) > 1 { - filterValue = filterArray[1] - } else { - filterValue = "" - } + filterKey, filterValue := splitFilterValue(filterValue) for labelKey, labelValue := range labels { if filterValue == "" || labelValue == filterValue { if labelKey == filterKey || matchPattern(filterKey, labelKey) { @@ -124,6 +120,32 @@ outer: return true } +// MatchNegatedLabelFilters matches negated labels and returns true if they are valid +func MatchNegatedLabelFilters(filterValues []string, labels map[string]string) bool { + for _, filterValue := range filterValues { + filterKey, filterValue := splitFilterValue(filterValue) + for labelKey, labelValue := range labels { + if filterValue == "" || labelValue == filterValue { + if labelKey == filterKey || matchPattern(filterKey, labelKey) { + return false + } + } + } + } + return true +} + +func splitFilterValue(filterValue string) (string, string) { + filterArray := strings.SplitN(filterValue, "=", 2) + filterKey := filterArray[0] + if len(filterArray) > 1 { + filterValue = filterArray[1] + } else { + filterValue = "" + } + return filterKey, filterValue +} + func matchPattern(pattern string, value string) bool { if strings.Contains(pattern, "*") { filter := fmt.Sprintf("*%s*", pattern) @@ -134,3 +156,22 @@ func matchPattern(pattern string, value string) bool { } return false } + +// FilterID is a function used to compare an id against a set of ids, if the +// input is hex we check if the prefix matches. Otherwise we assume it is a +// regex and try to match that. +// see https://github.com/containers/podman/issues/18471 for why we do this +func FilterID(id string, filters []string) bool { + for _, want := range filters { + isRegex := types.NotHexRegex.MatchString(want) + if isRegex { + match, err := regexp.MatchString(want, id) + if err == nil && match { + return true + } + } else if strings.HasPrefix(id, strings.ToLower(want)) { + return true + } + } + return false +} diff --git a/vendor/github.com/containers/common/pkg/password/password_supported.go b/vendor/github.com/containers/common/pkg/password/password_supported.go new file mode 100644 index 00000000..56e95b3d --- /dev/null +++ b/vendor/github.com/containers/common/pkg/password/password_supported.go @@ -0,0 +1,57 @@ +//go:build linux || darwin || freebsd +// +build linux darwin freebsd + +package password + +import ( + "errors" + "os" + "os/signal" + "syscall" + + terminal "golang.org/x/term" +) + +var ErrInterrupt = errors.New("interrupted") + +// Read reads a password from the terminal without echo. +func Read(fd int) ([]byte, error) { + // Store and restore the terminal status on interruptions to + // avoid that the terminal remains in the password state + // This is necessary as for https://github.com/golang/go/issues/31180 + + oldState, err := terminal.GetState(fd) + if err != nil { + return make([]byte, 0), err + } + + type Buffer struct { + Buffer []byte + Error error + } + errorChannel := make(chan Buffer, 1) + + // SIGINT and SIGTERM restore the terminal, otherwise the no-echo mode would remain intact + interruptChannel := make(chan os.Signal, 1) + signal.Notify(interruptChannel, syscall.SIGINT, syscall.SIGTERM) + defer func() { + signal.Stop(interruptChannel) + close(interruptChannel) + }() + go func() { + for range interruptChannel { + if oldState != nil { + _ = terminal.Restore(fd, oldState) + } + errorChannel <- Buffer{Buffer: make([]byte, 0), Error: ErrInterrupt} + } + }() + + go func() { + buf, err := terminal.ReadPassword(fd) + errorChannel <- Buffer{Buffer: buf, Error: err} + }() + + buf := <-errorChannel + return buf.Buffer, buf.Error +} diff --git a/vendor/github.com/containers/common/pkg/password/password_windows.go b/vendor/github.com/containers/common/pkg/password/password_windows.go new file mode 100644 index 00000000..7a0822d0 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/password/password_windows.go @@ -0,0 +1,21 @@ +//go:build windows +// +build windows + +package password + +import ( + terminal "golang.org/x/term" +) + +// Read reads a password from the terminal. +func Read(fd int) ([]byte, error) { + oldState, err := terminal.GetState(fd) + if err != nil { + return make([]byte, 0), err + } + buf, err := terminal.ReadPassword(fd) + if oldState != nil { + _ = terminal.Restore(fd, oldState) + } + return buf, err +} diff --git a/vendor/github.com/containers/common/pkg/subscriptions/subscriptions.go b/vendor/github.com/containers/common/pkg/subscriptions/subscriptions.go index a2924737..6ba2154a 100644 --- a/vendor/github.com/containers/common/pkg/subscriptions/subscriptions.go +++ b/vendor/github.com/containers/common/pkg/subscriptions/subscriptions.go @@ -362,6 +362,35 @@ func addFIPSModeSubscription(mounts *[]rspec.Mount, containerRunDir, mountPoint, } *mounts = append(*mounts, m) } + + // Make sure we set the config to FIPS so that the container does not overwrite + // /etc/crypto-policies/back-ends when crypto-policies-scripts is reinstalled. + cryptoPoliciesConfigFile := filepath.Join(containerRunDir, "fips-config") + file, err := os.Create(cryptoPoliciesConfigFile) + if err != nil { + return fmt.Errorf("creating fips config file in container for FIPS mode: %w", err) + } + defer file.Close() + if _, err := file.WriteString("FIPS\n"); err != nil { + return fmt.Errorf("writing fips config file in container for FIPS mode: %w", err) + } + if err = label.Relabel(cryptoPoliciesConfigFile, mountLabel, false); err != nil { + return fmt.Errorf("applying correct labels on fips-config file: %w", err) + } + if err := file.Chown(uid, gid); err != nil { + return fmt.Errorf("chown fips-config file: %w", err) + } + + policyConfig := "/etc/crypto-policies/config" + if !mountExists(*mounts, policyConfig) { + m := rspec.Mount{ + Source: cryptoPoliciesConfigFile, + Destination: policyConfig, + Type: "bind", + Options: []string{"bind", "rprivate"}, + } + *mounts = append(*mounts, m) + } return nil } diff --git a/vendor/github.com/containers/common/pkg/util/copy.go b/vendor/github.com/containers/common/pkg/util/copy.go deleted file mode 100644 index a45b82fc..00000000 --- a/vendor/github.com/containers/common/pkg/util/copy.go +++ /dev/null @@ -1,57 +0,0 @@ -package util - -import ( - "errors" - "io" -) - -// ErrDetach indicates that an attach session was manually detached by -// the user. -var ErrDetach = errors.New("detached from container") - -// CopyDetachable is similar to io.Copy but support a detach key sequence to break out. -func CopyDetachable(dst io.Writer, src io.Reader, keys []byte) (written int64, err error) { - buf := make([]byte, 32*1024) - for { - nr, er := src.Read(buf) - if nr > 0 { - preservBuf := []byte{} - for i, key := range keys { - preservBuf = append(preservBuf, buf[0:nr]...) - if nr != 1 || buf[0] != key { - break - } - if i == len(keys)-1 { - return 0, ErrDetach - } - nr, er = src.Read(buf) - } - var nw int - var ew error - if len(preservBuf) > 0 { - nw, ew = dst.Write(preservBuf) - nr = len(preservBuf) - } else { - nw, ew = dst.Write(buf[0:nr]) - } - if nw > 0 { - written += int64(nw) - } - if ew != nil { - err = ew - break - } - if nr != nw { - err = io.ErrShortWrite - break - } - } - if er != nil { - if er != io.EOF { - err = er - } - break - } - } - return written, err -} diff --git a/vendor/github.com/containers/common/pkg/util/util.go b/vendor/github.com/containers/common/pkg/util/util.go index 221b0119..708472ba 100644 --- a/vendor/github.com/containers/common/pkg/util/util.go +++ b/vendor/github.com/containers/common/pkg/util/util.go @@ -1,119 +1,16 @@ package util import ( - "bytes" - "errors" "fmt" "os" - "os/exec" "path/filepath" "regexp" - "strings" "time" - "github.com/containers/common/libnetwork/types" "github.com/fsnotify/fsnotify" "github.com/sirupsen/logrus" ) -const ( - UnknownPackage = "Unknown" -) - -var ErrInterrupt = errors.New("interrupted") - -// Note: This function is copied from containers/podman libpod/util.go -// Please see https://github.com/containers/common/pull/1460 -func queryPackageVersion(cmdArg ...string) string { - output := UnknownPackage - if 1 < len(cmdArg) { - cmd := exec.Command(cmdArg[0], cmdArg[1:]...) - if outp, err := cmd.Output(); err == nil { - output = string(outp) - deb := false - if cmdArg[0] == "/usr/bin/dlocate" { - // can return multiple matches - l := strings.Split(output, "\n") - output = l[0] - deb = true - } else if cmdArg[0] == "/usr/bin/dpkg" { - deb = true - } - if deb { - r := strings.Split(output, ": ") - queryFormat := `${Package}_${Version}_${Architecture}` - cmd = exec.Command("/usr/bin/dpkg-query", "-f", queryFormat, "-W", r[0]) - if outp, err := cmd.Output(); err == nil { - output = string(outp) - } - } - } - if cmdArg[0] == "/sbin/apk" { - prefix := cmdArg[len(cmdArg)-1] + " is owned by " - output = strings.Replace(output, prefix, "", 1) - } - } - return strings.Trim(output, "\n") -} - -// Note: This function is copied from containers/podman libpod/util.go -// Please see https://github.com/containers/common/pull/1460 -func PackageVersion(program string) string { // program is full path - _, err := os.Stat(program) - if err != nil { - return UnknownPackage - } - packagers := [][]string{ - {"/usr/bin/rpm", "-q", "-f"}, - {"/usr/bin/dlocate", "-F"}, // Debian, Ubuntu (quick) - {"/usr/bin/dpkg", "-S"}, // Debian, Ubuntu (slow) - {"/usr/bin/pacman", "-Qo"}, // Arch - {"/usr/bin/qfile", "-qv"}, // Gentoo (quick) - {"/usr/bin/equery", "b"}, // Gentoo (slow) - {"/sbin/apk", "info", "-W"}, // Alpine - {"/usr/local/sbin/pkg", "which", "-q"}, // FreeBSD - } - - for _, cmd := range packagers { - cmd = append(cmd, program) - if out := queryPackageVersion(cmd...); out != UnknownPackage { - return out - } - } - return UnknownPackage -} - -// Note: This function is copied from containers/podman libpod/util.go -// Please see https://github.com/containers/common/pull/1460 -func ProgramVersion(program string) (string, error) { - return programVersion(program, false) -} - -func ProgramVersionDnsname(program string) (string, error) { - return programVersion(program, true) -} - -func programVersion(program string, dnsname bool) (string, error) { - cmd := exec.Command(program, "--version") - var stdout bytes.Buffer - var stderr bytes.Buffer - cmd.Stdout = &stdout - cmd.Stderr = &stderr - - err := cmd.Run() - if err != nil { - return "", fmt.Errorf("`%v --version` failed: %v %v (%v)", program, stderr.String(), stdout.String(), err) - } - - output := strings.TrimSuffix(stdout.String(), "\n") - // dnsname --version returns the information to stderr - if dnsname { - output = strings.TrimSuffix(stderr.String(), "\n") - } - - return output, nil -} - // StringInSlice determines if a string is in a string slice, returns bool func StringInSlice(s string, sl []string) bool { for _, i := range sl { @@ -135,25 +32,6 @@ func StringMatchRegexSlice(s string, re []string) bool { return false } -// FilterID is a function used to compare an id against a set of ids, if the -// input is hex we check if the prefix matches. Otherwise we assume it is a -// regex and try to match that. -// see https://github.com/containers/podman/issues/18471 for why we do this -func FilterID(id string, filters []string) bool { - for _, want := range filters { - isRegex := types.NotHexRegex.MatchString(want) - if isRegex { - match, err := regexp.MatchString(want, id) - if err == nil && match { - return true - } - } else if strings.HasPrefix(id, strings.ToLower(want)) { - return true - } - } - return false -} - // WaitForFile waits until a file has been created or the given timeout has occurred func WaitForFile(path string, chWait chan error, timeout time.Duration) (bool, error) { var inotifyEvents chan fsnotify.Event diff --git a/vendor/github.com/containers/common/pkg/util/util_supported.go b/vendor/github.com/containers/common/pkg/util/util_supported.go index aa659c17..0cd53af5 100644 --- a/vendor/github.com/containers/common/pkg/util/util_supported.go +++ b/vendor/github.com/containers/common/pkg/util/util_supported.go @@ -7,7 +7,6 @@ import ( "errors" "fmt" "os" - "os/signal" "path/filepath" "sync" "syscall" @@ -15,7 +14,6 @@ import ( "github.com/containers/storage/pkg/homedir" "github.com/containers/storage/pkg/unshare" "github.com/sirupsen/logrus" - terminal "golang.org/x/term" ) var ( @@ -91,45 +89,3 @@ func GetRuntimeDir() (string, error) { } return rootlessRuntimeDir, nil } - -// ReadPassword reads a password from the terminal without echo. -func ReadPassword(fd int) ([]byte, error) { - // Store and restore the terminal status on interruptions to - // avoid that the terminal remains in the password state - // This is necessary as for https://github.com/golang/go/issues/31180 - - oldState, err := terminal.GetState(fd) - if err != nil { - return make([]byte, 0), err - } - - type Buffer struct { - Buffer []byte - Error error - } - errorChannel := make(chan Buffer, 1) - - // SIGINT and SIGTERM restore the terminal, otherwise the no-echo mode would remain intact - interruptChannel := make(chan os.Signal, 1) - signal.Notify(interruptChannel, syscall.SIGINT, syscall.SIGTERM) - defer func() { - signal.Stop(interruptChannel) - close(interruptChannel) - }() - go func() { - for range interruptChannel { - if oldState != nil { - _ = terminal.Restore(fd, oldState) - } - errorChannel <- Buffer{Buffer: make([]byte, 0), Error: ErrInterrupt} - } - }() - - go func() { - buf, err := terminal.ReadPassword(fd) - errorChannel <- Buffer{Buffer: buf, Error: err} - }() - - buf := <-errorChannel - return buf.Buffer, buf.Error -} diff --git a/vendor/github.com/containers/common/pkg/util/util_windows.go b/vendor/github.com/containers/common/pkg/util/util_windows.go index cc431e8a..1525bdc3 100644 --- a/vendor/github.com/containers/common/pkg/util/util_windows.go +++ b/vendor/github.com/containers/common/pkg/util/util_windows.go @@ -5,24 +5,9 @@ package util import ( "errors" - - terminal "golang.org/x/term" ) // getRuntimeDir returns the runtime directory func GetRuntimeDir() (string, error) { return "", errors.New("this function is not implemented for windows") } - -// ReadPassword reads a password from the terminal. -func ReadPassword(fd int) ([]byte, error) { - oldState, err := terminal.GetState(fd) - if err != nil { - return make([]byte, 0), err - } - buf, err := terminal.ReadPassword(fd) - if oldState != nil { - _ = terminal.Restore(fd, oldState) - } - return buf, err -} diff --git a/vendor/github.com/containers/common/pkg/version/version.go b/vendor/github.com/containers/common/pkg/version/version.go new file mode 100644 index 00000000..0d830060 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/version/version.go @@ -0,0 +1,105 @@ +package version + +import ( + "bytes" + "fmt" + "os" + "os/exec" + "strings" +) + +const ( + UnknownPackage = "Unknown" +) + +// Note: This function is copied from containers/podman libpod/util.go +// Please see https://github.com/containers/common/pull/1460 +func queryPackageVersion(cmdArg ...string) string { + output := UnknownPackage + if 1 < len(cmdArg) { + cmd := exec.Command(cmdArg[0], cmdArg[1:]...) + if outp, err := cmd.Output(); err == nil { + output = string(outp) + deb := false + if cmdArg[0] == "/usr/bin/dlocate" { + // can return multiple matches + l := strings.Split(output, "\n") + output = l[0] + deb = true + } else if cmdArg[0] == "/usr/bin/dpkg" { + deb = true + } + if deb { + r := strings.Split(output, ": ") + queryFormat := `${Package}_${Version}_${Architecture}` + cmd = exec.Command("/usr/bin/dpkg-query", "-f", queryFormat, "-W", r[0]) + if outp, err := cmd.Output(); err == nil { + output = string(outp) + } + } + } + if cmdArg[0] == "/sbin/apk" { + prefix := cmdArg[len(cmdArg)-1] + " is owned by " + output = strings.Replace(output, prefix, "", 1) + } + } + return strings.Trim(output, "\n") +} + +// Note: This function is copied from containers/podman libpod/util.go +// Please see https://github.com/containers/common/pull/1460 +func Package(program string) string { // program is full path + _, err := os.Stat(program) + if err != nil { + return UnknownPackage + } + packagers := [][]string{ + {"/usr/bin/rpm", "-q", "-f"}, + {"/usr/bin/dlocate", "-F"}, // Debian, Ubuntu (quick) + {"/usr/bin/dpkg", "-S"}, // Debian, Ubuntu (slow) + {"/usr/bin/pacman", "-Qo"}, // Arch + {"/usr/bin/qfile", "-qv"}, // Gentoo (quick) + {"/usr/bin/equery", "b"}, // Gentoo (slow) + {"/sbin/apk", "info", "-W"}, // Alpine + {"/usr/local/sbin/pkg", "which", "-q"}, // FreeBSD + } + + for _, cmd := range packagers { + cmd = append(cmd, program) + if out := queryPackageVersion(cmd...); out != UnknownPackage { + return out + } + } + return UnknownPackage +} + +// Note: This function is copied from containers/podman libpod/util.go +// Please see https://github.com/containers/common/pull/1460 +func Program(name string) (string, error) { + return program(name, false) +} + +func ProgramDnsname(name string) (string, error) { + return program(name, true) +} + +func program(program string, dnsname bool) (string, error) { + cmd := exec.Command(program, "--version") + var stdout bytes.Buffer + var stderr bytes.Buffer + cmd.Stdout = &stdout + cmd.Stderr = &stderr + + err := cmd.Run() + if err != nil { + return "", fmt.Errorf("`%v --version` failed: %v %v (%v)", program, stderr.String(), stdout.String(), err) + } + + output := strings.TrimSuffix(stdout.String(), "\n") + // dnsname --version returns the information to stderr + if dnsname { + output = strings.TrimSuffix(stderr.String(), "\n") + } + + return output, nil +} diff --git a/vendor/github.com/containers/common/version/version.go b/vendor/github.com/containers/common/version/version.go index a0a57b54..639a2d72 100644 --- a/vendor/github.com/containers/common/version/version.go +++ b/vendor/github.com/containers/common/version/version.go @@ -1,4 +1,4 @@ package version // Version is the version of the build. -const Version = "0.56.0" +const Version = "0.57.0" diff --git a/vendor/github.com/fsnotify/fsnotify/.cirrus.yml b/vendor/github.com/fsnotify/fsnotify/.cirrus.yml new file mode 100644 index 00000000..ffc7b992 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/.cirrus.yml @@ -0,0 +1,13 @@ +freebsd_task: + name: 'FreeBSD' + freebsd_instance: + image_family: freebsd-13-2 + install_script: + - pkg update -f + - pkg install -y go + test_script: + # run tests as user "cirrus" instead of root + - pw useradd cirrus -m + - chown -R cirrus:cirrus . + - FSNOTIFY_BUFFER=4096 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./... + - sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./... diff --git a/vendor/github.com/fsnotify/fsnotify/.gitignore b/vendor/github.com/fsnotify/fsnotify/.gitignore index 1d89d85c..391cc076 100644 --- a/vendor/github.com/fsnotify/fsnotify/.gitignore +++ b/vendor/github.com/fsnotify/fsnotify/.gitignore @@ -4,3 +4,4 @@ # Output of go build ./cmd/fsnotify /fsnotify +/fsnotify.exe diff --git a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md index 77f9593b..e0e57575 100644 --- a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md +++ b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md @@ -1,16 +1,87 @@ # Changelog -All notable changes to this project will be documented in this file. +Unreleased +---------- +Nothing yet. -The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), -and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +1.7.0 - 2023-10-22 +------------------ +This version of fsnotify needs Go 1.17. -## [Unreleased] +### Additions -Nothing yet. +- illumos: add FEN backend to support illumos and Solaris. ([#371]) + +- all: add `NewBufferedWatcher()` to use a buffered channel, which can be useful + in cases where you can't control the kernel buffer and receive a large number + of events in bursts. ([#550], [#572]) + +- all: add `AddWith()`, which is identical to `Add()` but allows passing + options. ([#521]) + +- windows: allow setting the ReadDirectoryChangesW() buffer size with + `fsnotify.WithBufferSize()`; the default of 64K is the highest value that + works on all platforms and is enough for most purposes, but in some cases a + highest buffer is needed. ([#521]) + +### Changes and fixes + +- inotify: remove watcher if a watched path is renamed ([#518]) + + After a rename the reported name wasn't updated, or even an empty string. + Inotify doesn't provide any good facilities to update it, so just remove the + watcher. This is already how it worked on kqueue and FEN. + + On Windows this does work, and remains working. + +- windows: don't listen for file attribute changes ([#520]) + + File attribute changes are sent as `FILE_ACTION_MODIFIED` by the Windows API, + with no way to see if they're a file write or attribute change, so would show + up as a fsnotify.Write event. This is never useful, and could result in many + spurious Write events. + +- windows: return `ErrEventOverflow` if the buffer is full ([#525]) + + Before it would merely return "short read", making it hard to detect this + error. + +- kqueue: make sure events for all files are delivered properly when removing a + watched directory ([#526]) + + Previously they would get sent with `""` (empty string) or `"."` as the path + name. + +- kqueue: don't emit spurious Create events for symbolic links ([#524]) + + The link would get resolved but kqueue would "forget" it already saw the link + itself, resulting on a Create for every Write event for the directory. + +- all: return `ErrClosed` on `Add()` when the watcher is closed ([#516]) + +- other: add `Watcher.Errors` and `Watcher.Events` to the no-op `Watcher` in + `backend_other.go`, making it easier to use on unsupported platforms such as + WASM, AIX, etc. ([#528]) + +- other: use the `backend_other.go` no-op if the `appengine` build tag is set; + Google AppEngine forbids usage of the unsafe package so the inotify backend + won't compile there. -## [1.6.0] - 2022-10-13 +[#371]: https://github.com/fsnotify/fsnotify/pull/371 +[#516]: https://github.com/fsnotify/fsnotify/pull/516 +[#518]: https://github.com/fsnotify/fsnotify/pull/518 +[#520]: https://github.com/fsnotify/fsnotify/pull/520 +[#521]: https://github.com/fsnotify/fsnotify/pull/521 +[#524]: https://github.com/fsnotify/fsnotify/pull/524 +[#525]: https://github.com/fsnotify/fsnotify/pull/525 +[#526]: https://github.com/fsnotify/fsnotify/pull/526 +[#528]: https://github.com/fsnotify/fsnotify/pull/528 +[#537]: https://github.com/fsnotify/fsnotify/pull/537 +[#550]: https://github.com/fsnotify/fsnotify/pull/550 +[#572]: https://github.com/fsnotify/fsnotify/pull/572 +1.6.0 - 2022-10-13 +------------------ This version of fsnotify needs Go 1.16 (this was already the case since 1.5.1, but not documented). It also increases the minimum Linux version to 2.6.32. diff --git a/vendor/github.com/fsnotify/fsnotify/README.md b/vendor/github.com/fsnotify/fsnotify/README.md index d4e6080f..e480733d 100644 --- a/vendor/github.com/fsnotify/fsnotify/README.md +++ b/vendor/github.com/fsnotify/fsnotify/README.md @@ -1,29 +1,31 @@ fsnotify is a Go library to provide cross-platform filesystem notifications on -Windows, Linux, macOS, and BSD systems. +Windows, Linux, macOS, BSD, and illumos. -Go 1.16 or newer is required; the full documentation is at +Go 1.17 or newer is required; the full documentation is at https://pkg.go.dev/github.com/fsnotify/fsnotify -**It's best to read the documentation at pkg.go.dev, as it's pinned to the last -released version, whereas this README is for the last development version which -may include additions/changes.** - --- Platform support: -| Adapter | OS | Status | -| --------------------- | ---------------| -------------------------------------------------------------| -| inotify | Linux 2.6.32+ | Supported | -| kqueue | BSD, macOS | Supported | -| ReadDirectoryChangesW | Windows | Supported | -| FSEvents | macOS | [Planned](https://github.com/fsnotify/fsnotify/issues/11) | -| FEN | Solaris 11 | [In Progress](https://github.com/fsnotify/fsnotify/pull/371) | -| fanotify | Linux 5.9+ | [Maybe](https://github.com/fsnotify/fsnotify/issues/114) | -| USN Journals | Windows | [Maybe](https://github.com/fsnotify/fsnotify/issues/53) | -| Polling | *All* | [Maybe](https://github.com/fsnotify/fsnotify/issues/9) | - -Linux and macOS should include Android and iOS, but these are currently untested. +| Backend | OS | Status | +| :-------------------- | :--------- | :------------------------------------------------------------------------ | +| inotify | Linux | Supported | +| kqueue | BSD, macOS | Supported | +| ReadDirectoryChangesW | Windows | Supported | +| FEN | illumos | Supported | +| fanotify | Linux 5.9+ | [Not yet](https://github.com/fsnotify/fsnotify/issues/114) | +| AHAFS | AIX | [aix branch]; experimental due to lack of maintainer and test environment | +| FSEvents | macOS | [Needs support in x/sys/unix][fsevents] | +| USN Journals | Windows | [Needs support in x/sys/windows][usn] | +| Polling | *All* | [Not yet](https://github.com/fsnotify/fsnotify/issues/9) | + +Linux and illumos should include Android and Solaris, but these are currently +untested. + +[fsevents]: https://github.com/fsnotify/fsnotify/issues/11#issuecomment-1279133120 +[usn]: https://github.com/fsnotify/fsnotify/issues/53#issuecomment-1279829847 +[aix branch]: https://github.com/fsnotify/fsnotify/issues/353#issuecomment-1284590129 Usage ----- @@ -83,20 +85,23 @@ run with: % go run ./cmd/fsnotify +Further detailed documentation can be found in godoc: +https://pkg.go.dev/github.com/fsnotify/fsnotify + FAQ --- ### Will a file still be watched when it's moved to another directory? No, not unless you are watching the location it was moved to. -### Are subdirectories watched too? +### Are subdirectories watched? No, you must add watches for any directory you want to watch (a recursive watcher is on the roadmap: [#18]). [#18]: https://github.com/fsnotify/fsnotify/issues/18 ### Do I have to watch the Error and Event channels in a goroutine? -As of now, yes (you can read both channels in the same goroutine using `select`, -you don't need a separate goroutine for both channels; see the example). +Yes. You can read both channels in the same goroutine using `select` (you don't +need a separate goroutine for both channels; see the example). ### Why don't notifications work with NFS, SMB, FUSE, /proc, or /sys? fsnotify requires support from underlying OS to work. The current NFS and SMB @@ -107,6 +112,32 @@ This could be fixed with a polling watcher ([#9]), but it's not yet implemented. [#9]: https://github.com/fsnotify/fsnotify/issues/9 +### Why do I get many Chmod events? +Some programs may generate a lot of attribute changes; for example Spotlight on +macOS, anti-virus programs, backup applications, and some others are known to do +this. As a rule, it's typically best to ignore Chmod events. They're often not +useful, and tend to cause problems. + +Spotlight indexing on macOS can result in multiple events (see [#15]). A +temporary workaround is to add your folder(s) to the *Spotlight Privacy +settings* until we have a native FSEvents implementation (see [#11]). + +[#11]: https://github.com/fsnotify/fsnotify/issues/11 +[#15]: https://github.com/fsnotify/fsnotify/issues/15 + +### Watching a file doesn't work well +Watching individual files (rather than directories) is generally not recommended +as many programs (especially editors) update files atomically: it will write to +a temporary file which is then moved to to destination, overwriting the original +(or some variant thereof). The watcher on the original file is now lost, as that +no longer exists. + +The upshot of this is that a power failure or crash won't leave a half-written +file. + +Watch the parent directory and use `Event.Name` to filter out files you're not +interested in. There is an example of this in `cmd/fsnotify/file.go`. + Platform-specific notes ----------------------- ### Linux @@ -151,11 +182,3 @@ these platforms. The sysctl variables `kern.maxfiles` and `kern.maxfilesperproc` can be used to control the maximum number of open files. - -### macOS -Spotlight indexing on macOS can result in multiple events (see [#15]). A temporary -workaround is to add your folder(s) to the *Spotlight Privacy settings* until we -have a native FSEvents implementation (see [#11]). - -[#11]: https://github.com/fsnotify/fsnotify/issues/11 -[#15]: https://github.com/fsnotify/fsnotify/issues/15 diff --git a/vendor/github.com/fsnotify/fsnotify/backend_fen.go b/vendor/github.com/fsnotify/fsnotify/backend_fen.go index 1a95ad8e..28497f1d 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_fen.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_fen.go @@ -1,10 +1,19 @@ //go:build solaris // +build solaris +// Note: the documentation on the Watcher type and methods is generated from +// mkdoc.zsh + package fsnotify import ( "errors" + "fmt" + "os" + "path/filepath" + "sync" + + "golang.org/x/sys/unix" ) // Watcher watches a set of paths, delivering events on a channel. @@ -17,9 +26,9 @@ import ( // When a file is removed a Remove event won't be emitted until all file // descriptors are closed, and deletes will always emit a Chmod. For example: // -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove +// fp := os.Open("file") +// os.Remove("file") // Triggers Chmod +// fp.Close() // Triggers Remove // // This is the event that inotify sends, so not much can be changed about this. // @@ -33,16 +42,16 @@ import ( // // To increase them you can use sysctl or write the value to the /proc file: // -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 +// # Default values on Linux 5.18 +// sysctl fs.inotify.max_user_watches=124983 +// sysctl fs.inotify.max_user_instances=128 // // To make the changes persist on reboot edit /etc/sysctl.conf or // /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check // your distro's documentation): // -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 +// fs.inotify.max_user_watches=124983 +// fs.inotify.max_user_instances=128 // // Reaching the limit will result in a "no space left on device" or "too many open // files" error. @@ -58,14 +67,20 @@ import ( // control the maximum number of open files, as well as /etc/login.conf on BSD // systems. // -// # macOS notes +// # Windows notes +// +// Paths can be added as "C:\path\to\dir", but forward slashes +// ("C:/path/to/dir") will also work. // -// Spotlight indexing on macOS can result in multiple events (see [#15]). A -// temporary workaround is to add your folder(s) to the "Spotlight Privacy -// Settings" until we have a native FSEvents implementation (see [#11]). +// When a watched directory is removed it will always send an event for the +// directory itself, but may not send events for all files in that directory. +// Sometimes it will send events for all times, sometimes it will send no +// events, and often only for some files. // -// [#11]: https://github.com/fsnotify/fsnotify/issues/11 -// [#15]: https://github.com/fsnotify/fsnotify/issues/15 +// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest +// value that is guaranteed to work with SMB filesystems. If you have many +// events in quick succession this may not be enough, and you will have to use +// [WithBufferSize] to increase the value. type Watcher struct { // Events sends the filesystem change events. // @@ -92,44 +107,129 @@ type Watcher struct { // initiated by the user may show up as one or multiple // writes, depending on when the system syncs things to // disk. For example when compiling a large Go program - // you may get hundreds of Write events, so you - // probably want to wait until you've stopped receiving - // them (see the dedup example in cmd/fsnotify). + // you may get hundreds of Write events, and you may + // want to wait until you've stopped receiving them + // (see the dedup example in cmd/fsnotify). + // + // Some systems may send Write event for directories + // when the directory content changes. // // fsnotify.Chmod Attributes were changed. On Linux this is also sent // when a file is removed (or more accurately, when a // link to an inode is removed). On kqueue it's sent - // and on kqueue when a file is truncated. On Windows - // it's never sent. + // when a file is truncated. On Windows it's never + // sent. Events chan Event // Errors sends any errors. + // + // ErrEventOverflow is used to indicate there are too many events: + // + // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) + // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. + // - kqueue, fen: Not used. Errors chan error + + mu sync.Mutex + port *unix.EventPort + done chan struct{} // Channel for sending a "quit message" to the reader goroutine + dirs map[string]struct{} // Explicitly watched directories + watches map[string]struct{} // Explicitly watched non-directories } // NewWatcher creates a new Watcher. func NewWatcher() (*Watcher, error) { - return nil, errors.New("FEN based watcher not yet supported for fsnotify\n") + return NewBufferedWatcher(0) } -// Close removes all watches and closes the events channel. +// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events +// channel. +// +// The main use case for this is situations with a very large number of events +// where the kernel buffer size can't be increased (e.g. due to lack of +// permissions). An unbuffered Watcher will perform better for almost all use +// cases, and whenever possible you will be better off increasing the kernel +// buffers instead of adding a large userspace buffer. +func NewBufferedWatcher(sz uint) (*Watcher, error) { + w := &Watcher{ + Events: make(chan Event, sz), + Errors: make(chan error), + dirs: make(map[string]struct{}), + watches: make(map[string]struct{}), + done: make(chan struct{}), + } + + var err error + w.port, err = unix.NewEventPort() + if err != nil { + return nil, fmt.Errorf("fsnotify.NewWatcher: %w", err) + } + + go w.readEvents() + return w, nil +} + +// sendEvent attempts to send an event to the user, returning true if the event +// was put in the channel successfully and false if the watcher has been closed. +func (w *Watcher) sendEvent(name string, op Op) (sent bool) { + select { + case w.Events <- Event{Name: name, Op: op}: + return true + case <-w.done: + return false + } +} + +// sendError attempts to send an error to the user, returning true if the error +// was put in the channel successfully and false if the watcher has been closed. +func (w *Watcher) sendError(err error) (sent bool) { + select { + case w.Errors <- err: + return true + case <-w.done: + return false + } +} + +func (w *Watcher) isClosed() bool { + select { + case <-w.done: + return true + default: + return false + } +} + +// Close removes all watches and closes the Events channel. func (w *Watcher) Close() error { - return nil + // Take the lock used by associateFile to prevent lingering events from + // being processed after the close + w.mu.Lock() + defer w.mu.Unlock() + if w.isClosed() { + return nil + } + close(w.done) + return w.port.Close() } // Add starts monitoring the path for changes. // -// A path can only be watched once; attempting to watch it more than once will -// return an error. Paths that do not yet exist on the filesystem cannot be -// added. A watch will be automatically removed if the path is deleted. +// A path can only be watched once; watching it more than once is a no-op and will +// not return an error. Paths that do not yet exist on the filesystem cannot be +// watched. // -// A path will remain watched if it gets renamed to somewhere else on the same -// filesystem, but the monitor will get removed if the path gets deleted and -// re-created, or if it's moved to a different filesystem. +// A watch will be automatically removed if the watched path is deleted or +// renamed. The exception is the Windows backend, which doesn't remove the +// watcher on renames. // // Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special // filesystems (/proc, /sys, etc.) generally don't work. // +// Returns [ErrClosed] if [Watcher.Close] was called. +// +// See [Watcher.AddWith] for a version that allows adding options. +// // # Watching directories // // All files in a directory are monitored, including new files that are created @@ -139,15 +239,63 @@ func (w *Watcher) Close() error { // # Watching files // // Watching individual files (rather than directories) is generally not -// recommended as many tools update files atomically. Instead of "just" writing -// to the file a temporary file will be written to first, and if successful the -// temporary file is moved to to destination removing the original, or some -// variant thereof. The watcher on the original file is now lost, as it no -// longer exists. -// -// Instead, watch the parent directory and use Event.Name to filter out files -// you're not interested in. There is an example of this in [cmd/fsnotify/file.go]. -func (w *Watcher) Add(name string) error { +// recommended as many programs (especially editors) update files atomically: it +// will write to a temporary file which is then moved to to destination, +// overwriting the original (or some variant thereof). The watcher on the +// original file is now lost, as that no longer exists. +// +// The upshot of this is that a power failure or crash won't leave a +// half-written file. +// +// Watch the parent directory and use Event.Name to filter out files you're not +// interested in. There is an example of this in cmd/fsnotify/file.go. +func (w *Watcher) Add(name string) error { return w.AddWith(name) } + +// AddWith is like [Watcher.Add], but allows adding options. When using Add() +// the defaults described below are used. +// +// Possible options are: +// +// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on +// other platforms. The default is 64K (65536 bytes). +func (w *Watcher) AddWith(name string, opts ...addOpt) error { + if w.isClosed() { + return ErrClosed + } + if w.port.PathIsWatched(name) { + return nil + } + + _ = getOptions(opts...) + + // Currently we resolve symlinks that were explicitly requested to be + // watched. Otherwise we would use LStat here. + stat, err := os.Stat(name) + if err != nil { + return err + } + + // Associate all files in the directory. + if stat.IsDir() { + err := w.handleDirectory(name, stat, true, w.associateFile) + if err != nil { + return err + } + + w.mu.Lock() + w.dirs[name] = struct{}{} + w.mu.Unlock() + return nil + } + + err = w.associateFile(name, stat, true) + if err != nil { + return err + } + + w.mu.Lock() + w.watches[name] = struct{}{} + w.mu.Unlock() return nil } @@ -157,6 +305,336 @@ func (w *Watcher) Add(name string) error { // /tmp/dir and /tmp/dir/subdir then you will need to remove both. // // Removing a path that has not yet been added returns [ErrNonExistentWatch]. +// +// Returns nil if [Watcher.Close] was called. func (w *Watcher) Remove(name string) error { + if w.isClosed() { + return nil + } + if !w.port.PathIsWatched(name) { + return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) + } + + // The user has expressed an intent. Immediately remove this name from + // whichever watch list it might be in. If it's not in there the delete + // doesn't cause harm. + w.mu.Lock() + delete(w.watches, name) + delete(w.dirs, name) + w.mu.Unlock() + + stat, err := os.Stat(name) + if err != nil { + return err + } + + // Remove associations for every file in the directory. + if stat.IsDir() { + err := w.handleDirectory(name, stat, false, w.dissociateFile) + if err != nil { + return err + } + return nil + } + + err = w.port.DissociatePath(name) + if err != nil { + return err + } + return nil } + +// readEvents contains the main loop that runs in a goroutine watching for events. +func (w *Watcher) readEvents() { + // If this function returns, the watcher has been closed and we can close + // these channels + defer func() { + close(w.Errors) + close(w.Events) + }() + + pevents := make([]unix.PortEvent, 8) + for { + count, err := w.port.Get(pevents, 1, nil) + if err != nil && err != unix.ETIME { + // Interrupted system call (count should be 0) ignore and continue + if errors.Is(err, unix.EINTR) && count == 0 { + continue + } + // Get failed because we called w.Close() + if errors.Is(err, unix.EBADF) && w.isClosed() { + return + } + // There was an error not caused by calling w.Close() + if !w.sendError(err) { + return + } + } + + p := pevents[:count] + for _, pevent := range p { + if pevent.Source != unix.PORT_SOURCE_FILE { + // Event from unexpected source received; should never happen. + if !w.sendError(errors.New("Event from unexpected source received")) { + return + } + continue + } + + err = w.handleEvent(&pevent) + if err != nil { + if !w.sendError(err) { + return + } + } + } + } +} + +func (w *Watcher) handleDirectory(path string, stat os.FileInfo, follow bool, handler func(string, os.FileInfo, bool) error) error { + files, err := os.ReadDir(path) + if err != nil { + return err + } + + // Handle all children of the directory. + for _, entry := range files { + finfo, err := entry.Info() + if err != nil { + return err + } + err = handler(filepath.Join(path, finfo.Name()), finfo, false) + if err != nil { + return err + } + } + + // And finally handle the directory itself. + return handler(path, stat, follow) +} + +// handleEvent might need to emit more than one fsnotify event if the events +// bitmap matches more than one event type (e.g. the file was both modified and +// had the attributes changed between when the association was created and the +// when event was returned) +func (w *Watcher) handleEvent(event *unix.PortEvent) error { + var ( + events = event.Events + path = event.Path + fmode = event.Cookie.(os.FileMode) + reRegister = true + ) + + w.mu.Lock() + _, watchedDir := w.dirs[path] + _, watchedPath := w.watches[path] + w.mu.Unlock() + isWatched := watchedDir || watchedPath + + if events&unix.FILE_DELETE != 0 { + if !w.sendEvent(path, Remove) { + return nil + } + reRegister = false + } + if events&unix.FILE_RENAME_FROM != 0 { + if !w.sendEvent(path, Rename) { + return nil + } + // Don't keep watching the new file name + reRegister = false + } + if events&unix.FILE_RENAME_TO != 0 { + // We don't report a Rename event for this case, because Rename events + // are interpreted as referring to the _old_ name of the file, and in + // this case the event would refer to the new name of the file. This + // type of rename event is not supported by fsnotify. + + // inotify reports a Remove event in this case, so we simulate this + // here. + if !w.sendEvent(path, Remove) { + return nil + } + // Don't keep watching the file that was removed + reRegister = false + } + + // The file is gone, nothing left to do. + if !reRegister { + if watchedDir { + w.mu.Lock() + delete(w.dirs, path) + w.mu.Unlock() + } + if watchedPath { + w.mu.Lock() + delete(w.watches, path) + w.mu.Unlock() + } + return nil + } + + // If we didn't get a deletion the file still exists and we're going to have + // to watch it again. Let's Stat it now so that we can compare permissions + // and have what we need to continue watching the file + + stat, err := os.Lstat(path) + if err != nil { + // This is unexpected, but we should still emit an event. This happens + // most often on "rm -r" of a subdirectory inside a watched directory We + // get a modify event of something happening inside, but by the time we + // get here, the sudirectory is already gone. Clearly we were watching + // this path but now it is gone. Let's tell the user that it was + // removed. + if !w.sendEvent(path, Remove) { + return nil + } + // Suppress extra write events on removed directories; they are not + // informative and can be confusing. + return nil + } + + // resolve symlinks that were explicitly watched as we would have at Add() + // time. this helps suppress spurious Chmod events on watched symlinks + if isWatched { + stat, err = os.Stat(path) + if err != nil { + // The symlink still exists, but the target is gone. Report the + // Remove similar to above. + if !w.sendEvent(path, Remove) { + return nil + } + // Don't return the error + } + } + + if events&unix.FILE_MODIFIED != 0 { + if fmode.IsDir() { + if watchedDir { + if err := w.updateDirectory(path); err != nil { + return err + } + } else { + if !w.sendEvent(path, Write) { + return nil + } + } + } else { + if !w.sendEvent(path, Write) { + return nil + } + } + } + if events&unix.FILE_ATTRIB != 0 && stat != nil { + // Only send Chmod if perms changed + if stat.Mode().Perm() != fmode.Perm() { + if !w.sendEvent(path, Chmod) { + return nil + } + } + } + + if stat != nil { + // If we get here, it means we've hit an event above that requires us to + // continue watching the file or directory + return w.associateFile(path, stat, isWatched) + } + return nil +} + +func (w *Watcher) updateDirectory(path string) error { + // The directory was modified, so we must find unwatched entities and watch + // them. If something was removed from the directory, nothing will happen, + // as everything else should still be watched. + files, err := os.ReadDir(path) + if err != nil { + return err + } + + for _, entry := range files { + path := filepath.Join(path, entry.Name()) + if w.port.PathIsWatched(path) { + continue + } + + finfo, err := entry.Info() + if err != nil { + return err + } + err = w.associateFile(path, finfo, false) + if err != nil { + if !w.sendError(err) { + return nil + } + } + if !w.sendEvent(path, Create) { + return nil + } + } + return nil +} + +func (w *Watcher) associateFile(path string, stat os.FileInfo, follow bool) error { + if w.isClosed() { + return ErrClosed + } + // This is primarily protecting the call to AssociatePath but it is + // important and intentional that the call to PathIsWatched is also + // protected by this mutex. Without this mutex, AssociatePath has been seen + // to error out that the path is already associated. + w.mu.Lock() + defer w.mu.Unlock() + + if w.port.PathIsWatched(path) { + // Remove the old association in favor of this one If we get ENOENT, + // then while the x/sys/unix wrapper still thought that this path was + // associated, the underlying event port did not. This call will have + // cleared up that discrepancy. The most likely cause is that the event + // has fired but we haven't processed it yet. + err := w.port.DissociatePath(path) + if err != nil && err != unix.ENOENT { + return err + } + } + // FILE_NOFOLLOW means we watch symlinks themselves rather than their + // targets. + events := unix.FILE_MODIFIED | unix.FILE_ATTRIB | unix.FILE_NOFOLLOW + if follow { + // We *DO* follow symlinks for explicitly watched entries. + events = unix.FILE_MODIFIED | unix.FILE_ATTRIB + } + return w.port.AssociatePath(path, stat, + events, + stat.Mode()) +} + +func (w *Watcher) dissociateFile(path string, stat os.FileInfo, unused bool) error { + if !w.port.PathIsWatched(path) { + return nil + } + return w.port.DissociatePath(path) +} + +// WatchList returns all paths explicitly added with [Watcher.Add] (and are not +// yet removed). +// +// Returns nil if [Watcher.Close] was called. +func (w *Watcher) WatchList() []string { + if w.isClosed() { + return nil + } + + w.mu.Lock() + defer w.mu.Unlock() + + entries := make([]string, 0, len(w.watches)+len(w.dirs)) + for pathname := range w.dirs { + entries = append(entries, pathname) + } + for pathname := range w.watches { + entries = append(entries, pathname) + } + + return entries +} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_inotify.go b/vendor/github.com/fsnotify/fsnotify/backend_inotify.go index 54c77fbb..921c1c1e 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_inotify.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_inotify.go @@ -1,5 +1,8 @@ -//go:build linux -// +build linux +//go:build linux && !appengine +// +build linux,!appengine + +// Note: the documentation on the Watcher type and methods is generated from +// mkdoc.zsh package fsnotify @@ -26,9 +29,9 @@ import ( // When a file is removed a Remove event won't be emitted until all file // descriptors are closed, and deletes will always emit a Chmod. For example: // -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove +// fp := os.Open("file") +// os.Remove("file") // Triggers Chmod +// fp.Close() // Triggers Remove // // This is the event that inotify sends, so not much can be changed about this. // @@ -42,16 +45,16 @@ import ( // // To increase them you can use sysctl or write the value to the /proc file: // -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 +// # Default values on Linux 5.18 +// sysctl fs.inotify.max_user_watches=124983 +// sysctl fs.inotify.max_user_instances=128 // // To make the changes persist on reboot edit /etc/sysctl.conf or // /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check // your distro's documentation): // -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 +// fs.inotify.max_user_watches=124983 +// fs.inotify.max_user_instances=128 // // Reaching the limit will result in a "no space left on device" or "too many open // files" error. @@ -67,14 +70,20 @@ import ( // control the maximum number of open files, as well as /etc/login.conf on BSD // systems. // -// # macOS notes +// # Windows notes +// +// Paths can be added as "C:\path\to\dir", but forward slashes +// ("C:/path/to/dir") will also work. // -// Spotlight indexing on macOS can result in multiple events (see [#15]). A -// temporary workaround is to add your folder(s) to the "Spotlight Privacy -// Settings" until we have a native FSEvents implementation (see [#11]). +// When a watched directory is removed it will always send an event for the +// directory itself, but may not send events for all files in that directory. +// Sometimes it will send events for all times, sometimes it will send no +// events, and often only for some files. // -// [#11]: https://github.com/fsnotify/fsnotify/issues/11 -// [#15]: https://github.com/fsnotify/fsnotify/issues/15 +// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest +// value that is guaranteed to work with SMB filesystems. If you have many +// events in quick succession this may not be enough, and you will have to use +// [WithBufferSize] to increase the value. type Watcher struct { // Events sends the filesystem change events. // @@ -101,36 +110,148 @@ type Watcher struct { // initiated by the user may show up as one or multiple // writes, depending on when the system syncs things to // disk. For example when compiling a large Go program - // you may get hundreds of Write events, so you - // probably want to wait until you've stopped receiving - // them (see the dedup example in cmd/fsnotify). + // you may get hundreds of Write events, and you may + // want to wait until you've stopped receiving them + // (see the dedup example in cmd/fsnotify). + // + // Some systems may send Write event for directories + // when the directory content changes. // // fsnotify.Chmod Attributes were changed. On Linux this is also sent // when a file is removed (or more accurately, when a // link to an inode is removed). On kqueue it's sent - // and on kqueue when a file is truncated. On Windows - // it's never sent. + // when a file is truncated. On Windows it's never + // sent. Events chan Event // Errors sends any errors. + // + // ErrEventOverflow is used to indicate there are too many events: + // + // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) + // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. + // - kqueue, fen: Not used. Errors chan error // Store fd here as os.File.Read() will no longer return on close after // calling Fd(). See: https://github.com/golang/go/issues/26439 fd int - mu sync.Mutex // Map access inotifyFile *os.File - watches map[string]*watch // Map of inotify watches (key: path) - paths map[int]string // Map of watched paths (key: watch descriptor) - done chan struct{} // Channel for sending a "quit message" to the reader goroutine - doneResp chan struct{} // Channel to respond to Close + watches *watches + done chan struct{} // Channel for sending a "quit message" to the reader goroutine + closeMu sync.Mutex + doneResp chan struct{} // Channel to respond to Close +} + +type ( + watches struct { + mu sync.RWMutex + wd map[uint32]*watch // wd → watch + path map[string]uint32 // pathname → wd + } + watch struct { + wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) + flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) + path string // Watch path. + } +) + +func newWatches() *watches { + return &watches{ + wd: make(map[uint32]*watch), + path: make(map[string]uint32), + } +} + +func (w *watches) len() int { + w.mu.RLock() + defer w.mu.RUnlock() + return len(w.wd) +} + +func (w *watches) add(ww *watch) { + w.mu.Lock() + defer w.mu.Unlock() + w.wd[ww.wd] = ww + w.path[ww.path] = ww.wd +} + +func (w *watches) remove(wd uint32) { + w.mu.Lock() + defer w.mu.Unlock() + delete(w.path, w.wd[wd].path) + delete(w.wd, wd) +} + +func (w *watches) removePath(path string) (uint32, bool) { + w.mu.Lock() + defer w.mu.Unlock() + + wd, ok := w.path[path] + if !ok { + return 0, false + } + + delete(w.path, path) + delete(w.wd, wd) + + return wd, true +} + +func (w *watches) byPath(path string) *watch { + w.mu.RLock() + defer w.mu.RUnlock() + return w.wd[w.path[path]] +} + +func (w *watches) byWd(wd uint32) *watch { + w.mu.RLock() + defer w.mu.RUnlock() + return w.wd[wd] +} + +func (w *watches) updatePath(path string, f func(*watch) (*watch, error)) error { + w.mu.Lock() + defer w.mu.Unlock() + + var existing *watch + wd, ok := w.path[path] + if ok { + existing = w.wd[wd] + } + + upd, err := f(existing) + if err != nil { + return err + } + if upd != nil { + w.wd[upd.wd] = upd + w.path[upd.path] = upd.wd + + if upd.wd != wd { + delete(w.wd, wd) + } + } + + return nil } // NewWatcher creates a new Watcher. func NewWatcher() (*Watcher, error) { - // Create inotify fd - // Need to set the FD to nonblocking mode in order for SetDeadline methods to work - // Otherwise, blocking i/o operations won't terminate on close + return NewBufferedWatcher(0) +} + +// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events +// channel. +// +// The main use case for this is situations with a very large number of events +// where the kernel buffer size can't be increased (e.g. due to lack of +// permissions). An unbuffered Watcher will perform better for almost all use +// cases, and whenever possible you will be better off increasing the kernel +// buffers instead of adding a large userspace buffer. +func NewBufferedWatcher(sz uint) (*Watcher, error) { + // Need to set nonblocking mode for SetDeadline to work, otherwise blocking + // I/O operations won't terminate on close. fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC | unix.IN_NONBLOCK) if fd == -1 { return nil, errno @@ -139,9 +260,8 @@ func NewWatcher() (*Watcher, error) { w := &Watcher{ fd: fd, inotifyFile: os.NewFile(uintptr(fd), ""), - watches: make(map[string]*watch), - paths: make(map[int]string), - Events: make(chan Event), + watches: newWatches(), + Events: make(chan Event, sz), Errors: make(chan error), done: make(chan struct{}), doneResp: make(chan struct{}), @@ -157,8 +277,8 @@ func (w *Watcher) sendEvent(e Event) bool { case w.Events <- e: return true case <-w.done: + return false } - return false } // Returns true if the error was sent, or false if watcher is closed. @@ -180,17 +300,15 @@ func (w *Watcher) isClosed() bool { } } -// Close removes all watches and closes the events channel. +// Close removes all watches and closes the Events channel. func (w *Watcher) Close() error { - w.mu.Lock() + w.closeMu.Lock() if w.isClosed() { - w.mu.Unlock() + w.closeMu.Unlock() return nil } - - // Send 'close' signal to goroutine, and set the Watcher to closed. close(w.done) - w.mu.Unlock() + w.closeMu.Unlock() // Causes any blocking reads to return with an error, provided the file // still supports deadline operations. @@ -207,17 +325,21 @@ func (w *Watcher) Close() error { // Add starts monitoring the path for changes. // -// A path can only be watched once; attempting to watch it more than once will -// return an error. Paths that do not yet exist on the filesystem cannot be -// added. A watch will be automatically removed if the path is deleted. +// A path can only be watched once; watching it more than once is a no-op and will +// not return an error. Paths that do not yet exist on the filesystem cannot be +// watched. // -// A path will remain watched if it gets renamed to somewhere else on the same -// filesystem, but the monitor will get removed if the path gets deleted and -// re-created, or if it's moved to a different filesystem. +// A watch will be automatically removed if the watched path is deleted or +// renamed. The exception is the Windows backend, which doesn't remove the +// watcher on renames. // // Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special // filesystems (/proc, /sys, etc.) generally don't work. // +// Returns [ErrClosed] if [Watcher.Close] was called. +// +// See [Watcher.AddWith] for a version that allows adding options. +// // # Watching directories // // All files in a directory are monitored, including new files that are created @@ -227,44 +349,59 @@ func (w *Watcher) Close() error { // # Watching files // // Watching individual files (rather than directories) is generally not -// recommended as many tools update files atomically. Instead of "just" writing -// to the file a temporary file will be written to first, and if successful the -// temporary file is moved to to destination removing the original, or some -// variant thereof. The watcher on the original file is now lost, as it no -// longer exists. -// -// Instead, watch the parent directory and use Event.Name to filter out files -// you're not interested in. There is an example of this in [cmd/fsnotify/file.go]. -func (w *Watcher) Add(name string) error { - name = filepath.Clean(name) +// recommended as many programs (especially editors) update files atomically: it +// will write to a temporary file which is then moved to to destination, +// overwriting the original (or some variant thereof). The watcher on the +// original file is now lost, as that no longer exists. +// +// The upshot of this is that a power failure or crash won't leave a +// half-written file. +// +// Watch the parent directory and use Event.Name to filter out files you're not +// interested in. There is an example of this in cmd/fsnotify/file.go. +func (w *Watcher) Add(name string) error { return w.AddWith(name) } + +// AddWith is like [Watcher.Add], but allows adding options. When using Add() +// the defaults described below are used. +// +// Possible options are: +// +// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on +// other platforms. The default is 64K (65536 bytes). +func (w *Watcher) AddWith(name string, opts ...addOpt) error { if w.isClosed() { - return errors.New("inotify instance already closed") + return ErrClosed } + name = filepath.Clean(name) + _ = getOptions(opts...) + var flags uint32 = unix.IN_MOVED_TO | unix.IN_MOVED_FROM | unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY | unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF - w.mu.Lock() - defer w.mu.Unlock() - watchEntry := w.watches[name] - if watchEntry != nil { - flags |= watchEntry.flags | unix.IN_MASK_ADD - } - wd, errno := unix.InotifyAddWatch(w.fd, name, flags) - if wd == -1 { - return errno - } + return w.watches.updatePath(name, func(existing *watch) (*watch, error) { + if existing != nil { + flags |= existing.flags | unix.IN_MASK_ADD + } - if watchEntry == nil { - w.watches[name] = &watch{wd: uint32(wd), flags: flags} - w.paths[wd] = name - } else { - watchEntry.wd = uint32(wd) - watchEntry.flags = flags - } + wd, err := unix.InotifyAddWatch(w.fd, name, flags) + if wd == -1 { + return nil, err + } - return nil + if existing == nil { + return &watch{ + wd: uint32(wd), + path: name, + flags: flags, + }, nil + } + + existing.wd = uint32(wd) + existing.flags = flags + return existing, nil + }) } // Remove stops monitoring the path for changes. @@ -273,32 +410,22 @@ func (w *Watcher) Add(name string) error { // /tmp/dir and /tmp/dir/subdir then you will need to remove both. // // Removing a path that has not yet been added returns [ErrNonExistentWatch]. +// +// Returns nil if [Watcher.Close] was called. func (w *Watcher) Remove(name string) error { - name = filepath.Clean(name) - - // Fetch the watch. - w.mu.Lock() - defer w.mu.Unlock() - watch, ok := w.watches[name] + if w.isClosed() { + return nil + } + return w.remove(filepath.Clean(name)) +} - // Remove it from inotify. +func (w *Watcher) remove(name string) error { + wd, ok := w.watches.removePath(name) if !ok { return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) } - // We successfully removed the watch if InotifyRmWatch doesn't return an - // error, we need to clean up our internal state to ensure it matches - // inotify's kernel state. - delete(w.paths, int(watch.wd)) - delete(w.watches, name) - - // inotify_rm_watch will return EINVAL if the file has been deleted; - // the inotify will already have been removed. - // watches and pathes are deleted in ignoreLinux() implicitly and asynchronously - // by calling inotify_rm_watch() below. e.g. readEvents() goroutine receives IN_IGNORE - // so that EINVAL means that the wd is being rm_watch()ed or its file removed - // by another thread and we have not received IN_IGNORE event. - success, errno := unix.InotifyRmWatch(w.fd, watch.wd) + success, errno := unix.InotifyRmWatch(w.fd, wd) if success == -1 { // TODO: Perhaps it's not helpful to return an error here in every case; // The only two possible errors are: @@ -312,28 +439,28 @@ func (w *Watcher) Remove(name string) error { // are watching is deleted. return errno } - return nil } -// WatchList returns all paths added with [Add] (and are not yet removed). +// WatchList returns all paths explicitly added with [Watcher.Add] (and are not +// yet removed). +// +// Returns nil if [Watcher.Close] was called. func (w *Watcher) WatchList() []string { - w.mu.Lock() - defer w.mu.Unlock() + if w.isClosed() { + return nil + } - entries := make([]string, 0, len(w.watches)) - for pathname := range w.watches { + entries := make([]string, 0, w.watches.len()) + w.watches.mu.RLock() + for pathname := range w.watches.path { entries = append(entries, pathname) } + w.watches.mu.RUnlock() return entries } -type watch struct { - wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) - flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) -} - // readEvents reads from the inotify file descriptor, converts the // received events into Event objects and sends them via the Events channel func (w *Watcher) readEvents() { @@ -367,14 +494,11 @@ func (w *Watcher) readEvents() { if n < unix.SizeofInotifyEvent { var err error if n == 0 { - // If EOF is received. This should really never happen. - err = io.EOF + err = io.EOF // If EOF is received. This should really never happen. } else if n < 0 { - // If an error occurred while reading. - err = errno + err = errno // If an error occurred while reading. } else { - // Read was too short. - err = errors.New("notify: short read in readEvents()") + err = errors.New("notify: short read in readEvents()") // Read was too short. } if !w.sendError(err) { return @@ -403,18 +527,29 @@ func (w *Watcher) readEvents() { // doesn't append the filename to the event, but we would like to always fill the // the "Name" field with a valid filename. We retrieve the path of the watch from // the "paths" map. - w.mu.Lock() - name, ok := w.paths[int(raw.Wd)] - // IN_DELETE_SELF occurs when the file/directory being watched is removed. - // This is a sign to clean up the maps, otherwise we are no longer in sync - // with the inotify kernel state which has already deleted the watch - // automatically. - if ok && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { - delete(w.paths, int(raw.Wd)) - delete(w.watches, name) + watch := w.watches.byWd(uint32(raw.Wd)) + + // inotify will automatically remove the watch on deletes; just need + // to clean our state here. + if watch != nil && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { + w.watches.remove(watch.wd) + } + // We can't really update the state when a watched path is moved; + // only IN_MOVE_SELF is sent and not IN_MOVED_{FROM,TO}. So remove + // the watch. + if watch != nil && mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF { + err := w.remove(watch.path) + if err != nil && !errors.Is(err, ErrNonExistentWatch) { + if !w.sendError(err) { + return + } + } } - w.mu.Unlock() + var name string + if watch != nil { + name = watch.path + } if nameLen > 0 { // Point "bytes" at the first byte of the filename bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen] diff --git a/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go b/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go index 29087469..063a0915 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go @@ -1,12 +1,14 @@ //go:build freebsd || openbsd || netbsd || dragonfly || darwin // +build freebsd openbsd netbsd dragonfly darwin +// Note: the documentation on the Watcher type and methods is generated from +// mkdoc.zsh + package fsnotify import ( "errors" "fmt" - "io/ioutil" "os" "path/filepath" "sync" @@ -24,9 +26,9 @@ import ( // When a file is removed a Remove event won't be emitted until all file // descriptors are closed, and deletes will always emit a Chmod. For example: // -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove +// fp := os.Open("file") +// os.Remove("file") // Triggers Chmod +// fp.Close() // Triggers Remove // // This is the event that inotify sends, so not much can be changed about this. // @@ -40,16 +42,16 @@ import ( // // To increase them you can use sysctl or write the value to the /proc file: // -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 +// # Default values on Linux 5.18 +// sysctl fs.inotify.max_user_watches=124983 +// sysctl fs.inotify.max_user_instances=128 // // To make the changes persist on reboot edit /etc/sysctl.conf or // /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check // your distro's documentation): // -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 +// fs.inotify.max_user_watches=124983 +// fs.inotify.max_user_instances=128 // // Reaching the limit will result in a "no space left on device" or "too many open // files" error. @@ -65,14 +67,20 @@ import ( // control the maximum number of open files, as well as /etc/login.conf on BSD // systems. // -// # macOS notes +// # Windows notes +// +// Paths can be added as "C:\path\to\dir", but forward slashes +// ("C:/path/to/dir") will also work. // -// Spotlight indexing on macOS can result in multiple events (see [#15]). A -// temporary workaround is to add your folder(s) to the "Spotlight Privacy -// Settings" until we have a native FSEvents implementation (see [#11]). +// When a watched directory is removed it will always send an event for the +// directory itself, but may not send events for all files in that directory. +// Sometimes it will send events for all times, sometimes it will send no +// events, and often only for some files. // -// [#11]: https://github.com/fsnotify/fsnotify/issues/11 -// [#15]: https://github.com/fsnotify/fsnotify/issues/15 +// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest +// value that is guaranteed to work with SMB filesystems. If you have many +// events in quick succession this may not be enough, and you will have to use +// [WithBufferSize] to increase the value. type Watcher struct { // Events sends the filesystem change events. // @@ -99,18 +107,27 @@ type Watcher struct { // initiated by the user may show up as one or multiple // writes, depending on when the system syncs things to // disk. For example when compiling a large Go program - // you may get hundreds of Write events, so you - // probably want to wait until you've stopped receiving - // them (see the dedup example in cmd/fsnotify). + // you may get hundreds of Write events, and you may + // want to wait until you've stopped receiving them + // (see the dedup example in cmd/fsnotify). + // + // Some systems may send Write event for directories + // when the directory content changes. // // fsnotify.Chmod Attributes were changed. On Linux this is also sent // when a file is removed (or more accurately, when a // link to an inode is removed). On kqueue it's sent - // and on kqueue when a file is truncated. On Windows - // it's never sent. + // when a file is truncated. On Windows it's never + // sent. Events chan Event // Errors sends any errors. + // + // ErrEventOverflow is used to indicate there are too many events: + // + // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) + // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. + // - kqueue, fen: Not used. Errors chan error done chan struct{} @@ -133,6 +150,18 @@ type pathInfo struct { // NewWatcher creates a new Watcher. func NewWatcher() (*Watcher, error) { + return NewBufferedWatcher(0) +} + +// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events +// channel. +// +// The main use case for this is situations with a very large number of events +// where the kernel buffer size can't be increased (e.g. due to lack of +// permissions). An unbuffered Watcher will perform better for almost all use +// cases, and whenever possible you will be better off increasing the kernel +// buffers instead of adding a large userspace buffer. +func NewBufferedWatcher(sz uint) (*Watcher, error) { kq, closepipe, err := newKqueue() if err != nil { return nil, err @@ -147,7 +176,7 @@ func NewWatcher() (*Watcher, error) { paths: make(map[int]pathInfo), fileExists: make(map[string]struct{}), userWatches: make(map[string]struct{}), - Events: make(chan Event), + Events: make(chan Event, sz), Errors: make(chan error), done: make(chan struct{}), } @@ -197,8 +226,8 @@ func (w *Watcher) sendEvent(e Event) bool { case w.Events <- e: return true case <-w.done: + return false } - return false } // Returns true if the error was sent, or false if watcher is closed. @@ -207,11 +236,11 @@ func (w *Watcher) sendError(err error) bool { case w.Errors <- err: return true case <-w.done: + return false } - return false } -// Close removes all watches and closes the events channel. +// Close removes all watches and closes the Events channel. func (w *Watcher) Close() error { w.mu.Lock() if w.isClosed { @@ -239,17 +268,21 @@ func (w *Watcher) Close() error { // Add starts monitoring the path for changes. // -// A path can only be watched once; attempting to watch it more than once will -// return an error. Paths that do not yet exist on the filesystem cannot be -// added. A watch will be automatically removed if the path is deleted. +// A path can only be watched once; watching it more than once is a no-op and will +// not return an error. Paths that do not yet exist on the filesystem cannot be +// watched. // -// A path will remain watched if it gets renamed to somewhere else on the same -// filesystem, but the monitor will get removed if the path gets deleted and -// re-created, or if it's moved to a different filesystem. +// A watch will be automatically removed if the watched path is deleted or +// renamed. The exception is the Windows backend, which doesn't remove the +// watcher on renames. // // Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special // filesystems (/proc, /sys, etc.) generally don't work. // +// Returns [ErrClosed] if [Watcher.Close] was called. +// +// See [Watcher.AddWith] for a version that allows adding options. +// // # Watching directories // // All files in a directory are monitored, including new files that are created @@ -259,15 +292,28 @@ func (w *Watcher) Close() error { // # Watching files // // Watching individual files (rather than directories) is generally not -// recommended as many tools update files atomically. Instead of "just" writing -// to the file a temporary file will be written to first, and if successful the -// temporary file is moved to to destination removing the original, or some -// variant thereof. The watcher on the original file is now lost, as it no -// longer exists. -// -// Instead, watch the parent directory and use Event.Name to filter out files -// you're not interested in. There is an example of this in [cmd/fsnotify/file.go]. -func (w *Watcher) Add(name string) error { +// recommended as many programs (especially editors) update files atomically: it +// will write to a temporary file which is then moved to to destination, +// overwriting the original (or some variant thereof). The watcher on the +// original file is now lost, as that no longer exists. +// +// The upshot of this is that a power failure or crash won't leave a +// half-written file. +// +// Watch the parent directory and use Event.Name to filter out files you're not +// interested in. There is an example of this in cmd/fsnotify/file.go. +func (w *Watcher) Add(name string) error { return w.AddWith(name) } + +// AddWith is like [Watcher.Add], but allows adding options. When using Add() +// the defaults described below are used. +// +// Possible options are: +// +// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on +// other platforms. The default is 64K (65536 bytes). +func (w *Watcher) AddWith(name string, opts ...addOpt) error { + _ = getOptions(opts...) + w.mu.Lock() w.userWatches[name] = struct{}{} w.mu.Unlock() @@ -281,9 +327,19 @@ func (w *Watcher) Add(name string) error { // /tmp/dir and /tmp/dir/subdir then you will need to remove both. // // Removing a path that has not yet been added returns [ErrNonExistentWatch]. +// +// Returns nil if [Watcher.Close] was called. func (w *Watcher) Remove(name string) error { + return w.remove(name, true) +} + +func (w *Watcher) remove(name string, unwatchFiles bool) error { name = filepath.Clean(name) w.mu.Lock() + if w.isClosed { + w.mu.Unlock() + return nil + } watchfd, ok := w.watches[name] w.mu.Unlock() if !ok { @@ -315,7 +371,7 @@ func (w *Watcher) Remove(name string) error { w.mu.Unlock() // Find all watched paths that are in this directory that are not external. - if isDir { + if unwatchFiles && isDir { var pathsToRemove []string w.mu.Lock() for fd := range w.watchesByDir[name] { @@ -326,20 +382,25 @@ func (w *Watcher) Remove(name string) error { } w.mu.Unlock() for _, name := range pathsToRemove { - // Since these are internal, not much sense in propagating error - // to the user, as that will just confuse them with an error about - // a path they did not explicitly watch themselves. + // Since these are internal, not much sense in propagating error to + // the user, as that will just confuse them with an error about a + // path they did not explicitly watch themselves. w.Remove(name) } } - return nil } -// WatchList returns all paths added with [Add] (and are not yet removed). +// WatchList returns all paths explicitly added with [Watcher.Add] (and are not +// yet removed). +// +// Returns nil if [Watcher.Close] was called. func (w *Watcher) WatchList() []string { w.mu.Lock() defer w.mu.Unlock() + if w.isClosed { + return nil + } entries := make([]string, 0, len(w.userWatches)) for pathname := range w.userWatches { @@ -352,18 +413,18 @@ func (w *Watcher) WatchList() []string { // Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE) const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME -// addWatch adds name to the watched file set. -// The flags are interpreted as described in kevent(2). -// Returns the real path to the file which was added, if any, which may be different from the one passed in the case of symlinks. +// addWatch adds name to the watched file set; the flags are interpreted as +// described in kevent(2). +// +// Returns the real path to the file which was added, with symlinks resolved. func (w *Watcher) addWatch(name string, flags uint32) (string, error) { var isDir bool - // Make ./name and name equivalent name = filepath.Clean(name) w.mu.Lock() if w.isClosed { w.mu.Unlock() - return "", errors.New("kevent instance already closed") + return "", ErrClosed } watchfd, alreadyWatching := w.watches[name] // We already have a watch, but we can still override flags. @@ -383,27 +444,30 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) { return "", nil } - // Follow Symlinks - // - // Linux can add unresolvable symlinks to the watch list without issue, - // and Windows can't do symlinks period. To maintain consistency, we - // will act like everything is fine if the link can't be resolved. - // There will simply be no file events for broken symlinks. Hence the - // returns of nil on errors. + // Follow Symlinks. if fi.Mode()&os.ModeSymlink == os.ModeSymlink { - name, err = filepath.EvalSymlinks(name) + link, err := os.Readlink(name) if err != nil { + // Return nil because Linux can add unresolvable symlinks to the + // watch list without problems, so maintain consistency with + // that. There will be no file events for broken symlinks. + // TODO: more specific check; returns os.PathError; ENOENT? return "", nil } w.mu.Lock() - _, alreadyWatching = w.watches[name] + _, alreadyWatching = w.watches[link] w.mu.Unlock() if alreadyWatching { - return name, nil + // Add to watches so we don't get spurious Create events later + // on when we diff the directories. + w.watches[name] = 0 + w.fileExists[name] = struct{}{} + return link, nil } + name = link fi, err = os.Lstat(name) if err != nil { return "", nil @@ -411,7 +475,7 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) { } // Retry on EINTR; open() can return EINTR in practice on macOS. - // See #354, and go issues 11180 and 39237. + // See #354, and Go issues 11180 and 39237. for { watchfd, err = unix.Open(name, openMode, 0) if err == nil { @@ -444,14 +508,13 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) { w.watchesByDir[parentName] = watchesByDir } watchesByDir[watchfd] = struct{}{} - w.paths[watchfd] = pathInfo{name: name, isDir: isDir} w.mu.Unlock() } if isDir { - // Watch the directory if it has not been watched before, - // or if it was watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) + // Watch the directory if it has not been watched before, or if it was + // watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) w.mu.Lock() watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE && @@ -473,13 +536,10 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) { // Event values that it sends down the Events channel. func (w *Watcher) readEvents() { defer func() { - err := unix.Close(w.kq) - if err != nil { - w.Errors <- err - } - unix.Close(w.closepipe[0]) close(w.Events) close(w.Errors) + _ = unix.Close(w.kq) + unix.Close(w.closepipe[0]) }() eventBuffer := make([]unix.Kevent_t, 10) @@ -513,18 +573,8 @@ func (w *Watcher) readEvents() { event := w.newEvent(path.name, mask) - if path.isDir && !event.Has(Remove) { - // Double check to make sure the directory exists. This can - // happen when we do a rm -fr on a recursively watched folders - // and we receive a modification event first but the folder has - // been deleted and later receive the delete event. - if _, err := os.Lstat(event.Name); os.IsNotExist(err) { - event.Op |= Remove - } - } - if event.Has(Rename) || event.Has(Remove) { - w.Remove(event.Name) + w.remove(event.Name, false) w.mu.Lock() delete(w.fileExists, event.Name) w.mu.Unlock() @@ -540,26 +590,30 @@ func (w *Watcher) readEvents() { } if event.Has(Remove) { - // Look for a file that may have overwritten this. - // For example, mv f1 f2 will delete f2, then create f2. + // Look for a file that may have overwritten this; for example, + // mv f1 f2 will delete f2, then create f2. if path.isDir { fileDir := filepath.Clean(event.Name) w.mu.Lock() _, found := w.watches[fileDir] w.mu.Unlock() if found { - // make sure the directory exists before we watch for changes. When we - // do a recursive watch and perform rm -fr, the parent directory might - // have gone missing, ignore the missing directory and let the - // upcoming delete event remove the watch from the parent directory. - if _, err := os.Lstat(fileDir); err == nil { - w.sendDirectoryChangeEvents(fileDir) + err := w.sendDirectoryChangeEvents(fileDir) + if err != nil { + if !w.sendError(err) { + closed = true + } } } } else { filePath := filepath.Clean(event.Name) - if fileInfo, err := os.Lstat(filePath); err == nil { - w.sendFileCreatedEventIfNew(filePath, fileInfo) + if fi, err := os.Lstat(filePath); err == nil { + err := w.sendFileCreatedEventIfNew(filePath, fi) + if err != nil { + if !w.sendError(err) { + closed = true + } + } } } } @@ -582,21 +636,31 @@ func (w *Watcher) newEvent(name string, mask uint32) Event { if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB { e.Op |= Chmod } + // No point sending a write and delete event at the same time: if it's gone, + // then it's gone. + if e.Op.Has(Write) && e.Op.Has(Remove) { + e.Op &^= Write + } return e } // watchDirectoryFiles to mimic inotify when adding a watch on a directory func (w *Watcher) watchDirectoryFiles(dirPath string) error { // Get all files - files, err := ioutil.ReadDir(dirPath) + files, err := os.ReadDir(dirPath) if err != nil { return err } - for _, fileInfo := range files { - path := filepath.Join(dirPath, fileInfo.Name()) + for _, f := range files { + path := filepath.Join(dirPath, f.Name()) + + fi, err := f.Info() + if err != nil { + return fmt.Errorf("%q: %w", path, err) + } - cleanPath, err := w.internalWatch(path, fileInfo) + cleanPath, err := w.internalWatch(path, fi) if err != nil { // No permission to read the file; that's not a problem: just skip. // But do add it to w.fileExists to prevent it from being picked up @@ -606,7 +670,7 @@ func (w *Watcher) watchDirectoryFiles(dirPath string) error { case errors.Is(err, unix.EACCES) || errors.Is(err, unix.EPERM): cleanPath = filepath.Clean(path) default: - return fmt.Errorf("%q: %w", filepath.Join(dirPath, fileInfo.Name()), err) + return fmt.Errorf("%q: %w", path, err) } } @@ -622,26 +686,37 @@ func (w *Watcher) watchDirectoryFiles(dirPath string) error { // // This functionality is to have the BSD watcher match the inotify, which sends // a create event for files created in a watched directory. -func (w *Watcher) sendDirectoryChangeEvents(dir string) { - // Get all files - files, err := ioutil.ReadDir(dir) +func (w *Watcher) sendDirectoryChangeEvents(dir string) error { + files, err := os.ReadDir(dir) if err != nil { - if !w.sendError(fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err)) { - return + // Directory no longer exists: we can ignore this safely. kqueue will + // still give us the correct events. + if errors.Is(err, os.ErrNotExist) { + return nil } + return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err) } - // Search for new files - for _, fi := range files { - err := w.sendFileCreatedEventIfNew(filepath.Join(dir, fi.Name()), fi) + for _, f := range files { + fi, err := f.Info() if err != nil { - return + return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err) + } + + err = w.sendFileCreatedEventIfNew(filepath.Join(dir, fi.Name()), fi) + if err != nil { + // Don't need to send an error if this file isn't readable. + if errors.Is(err, unix.EACCES) || errors.Is(err, unix.EPERM) { + return nil + } + return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err) } } + return nil } // sendFileCreatedEvent sends a create event if the file isn't already being tracked. -func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInfo) (err error) { +func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fi os.FileInfo) (err error) { w.mu.Lock() _, doesExist := w.fileExists[filePath] w.mu.Unlock() @@ -652,7 +727,7 @@ func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInf } // like watchDirectoryFiles (but without doing another ReadDir) - filePath, err = w.internalWatch(filePath, fileInfo) + filePath, err = w.internalWatch(filePath, fi) if err != nil { return err } @@ -664,10 +739,10 @@ func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInf return nil } -func (w *Watcher) internalWatch(name string, fileInfo os.FileInfo) (string, error) { - if fileInfo.IsDir() { - // mimic Linux providing delete events for subdirectories - // but preserve the flags used if currently watching subdirectory +func (w *Watcher) internalWatch(name string, fi os.FileInfo) (string, error) { + if fi.IsDir() { + // mimic Linux providing delete events for subdirectories, but preserve + // the flags used if currently watching subdirectory w.mu.Lock() flags := w.dirFlags[name] w.mu.Unlock() diff --git a/vendor/github.com/fsnotify/fsnotify/backend_other.go b/vendor/github.com/fsnotify/fsnotify/backend_other.go index a9bb1c3c..d34a23c0 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_other.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_other.go @@ -1,39 +1,169 @@ -//go:build !darwin && !dragonfly && !freebsd && !openbsd && !linux && !netbsd && !solaris && !windows -// +build !darwin,!dragonfly,!freebsd,!openbsd,!linux,!netbsd,!solaris,!windows +//go:build appengine || (!darwin && !dragonfly && !freebsd && !openbsd && !linux && !netbsd && !solaris && !windows) +// +build appengine !darwin,!dragonfly,!freebsd,!openbsd,!linux,!netbsd,!solaris,!windows + +// Note: the documentation on the Watcher type and methods is generated from +// mkdoc.zsh package fsnotify -import ( - "fmt" - "runtime" -) +import "errors" -// Watcher watches a set of files, delivering events to a channel. -type Watcher struct{} +// Watcher watches a set of paths, delivering events on a channel. +// +// A watcher should not be copied (e.g. pass it by pointer, rather than by +// value). +// +// # Linux notes +// +// When a file is removed a Remove event won't be emitted until all file +// descriptors are closed, and deletes will always emit a Chmod. For example: +// +// fp := os.Open("file") +// os.Remove("file") // Triggers Chmod +// fp.Close() // Triggers Remove +// +// This is the event that inotify sends, so not much can be changed about this. +// +// The fs.inotify.max_user_watches sysctl variable specifies the upper limit +// for the number of watches per user, and fs.inotify.max_user_instances +// specifies the maximum number of inotify instances per user. Every Watcher you +// create is an "instance", and every path you add is a "watch". +// +// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and +// /proc/sys/fs/inotify/max_user_instances +// +// To increase them you can use sysctl or write the value to the /proc file: +// +// # Default values on Linux 5.18 +// sysctl fs.inotify.max_user_watches=124983 +// sysctl fs.inotify.max_user_instances=128 +// +// To make the changes persist on reboot edit /etc/sysctl.conf or +// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check +// your distro's documentation): +// +// fs.inotify.max_user_watches=124983 +// fs.inotify.max_user_instances=128 +// +// Reaching the limit will result in a "no space left on device" or "too many open +// files" error. +// +// # kqueue notes (macOS, BSD) +// +// kqueue requires opening a file descriptor for every file that's being watched; +// so if you're watching a directory with five files then that's six file +// descriptors. You will run in to your system's "max open files" limit faster on +// these platforms. +// +// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to +// control the maximum number of open files, as well as /etc/login.conf on BSD +// systems. +// +// # Windows notes +// +// Paths can be added as "C:\path\to\dir", but forward slashes +// ("C:/path/to/dir") will also work. +// +// When a watched directory is removed it will always send an event for the +// directory itself, but may not send events for all files in that directory. +// Sometimes it will send events for all times, sometimes it will send no +// events, and often only for some files. +// +// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest +// value that is guaranteed to work with SMB filesystems. If you have many +// events in quick succession this may not be enough, and you will have to use +// [WithBufferSize] to increase the value. +type Watcher struct { + // Events sends the filesystem change events. + // + // fsnotify can send the following events; a "path" here can refer to a + // file, directory, symbolic link, or special file like a FIFO. + // + // fsnotify.Create A new path was created; this may be followed by one + // or more Write events if data also gets written to a + // file. + // + // fsnotify.Remove A path was removed. + // + // fsnotify.Rename A path was renamed. A rename is always sent with the + // old path as Event.Name, and a Create event will be + // sent with the new name. Renames are only sent for + // paths that are currently watched; e.g. moving an + // unmonitored file into a monitored directory will + // show up as just a Create. Similarly, renaming a file + // to outside a monitored directory will show up as + // only a Rename. + // + // fsnotify.Write A file or named pipe was written to. A Truncate will + // also trigger a Write. A single "write action" + // initiated by the user may show up as one or multiple + // writes, depending on when the system syncs things to + // disk. For example when compiling a large Go program + // you may get hundreds of Write events, and you may + // want to wait until you've stopped receiving them + // (see the dedup example in cmd/fsnotify). + // + // Some systems may send Write event for directories + // when the directory content changes. + // + // fsnotify.Chmod Attributes were changed. On Linux this is also sent + // when a file is removed (or more accurately, when a + // link to an inode is removed). On kqueue it's sent + // when a file is truncated. On Windows it's never + // sent. + Events chan Event + + // Errors sends any errors. + // + // ErrEventOverflow is used to indicate there are too many events: + // + // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) + // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. + // - kqueue, fen: Not used. + Errors chan error +} // NewWatcher creates a new Watcher. func NewWatcher() (*Watcher, error) { - return nil, fmt.Errorf("fsnotify not supported on %s", runtime.GOOS) + return nil, errors.New("fsnotify not supported on the current platform") } -// Close removes all watches and closes the events channel. -func (w *Watcher) Close() error { - return nil -} +// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events +// channel. +// +// The main use case for this is situations with a very large number of events +// where the kernel buffer size can't be increased (e.g. due to lack of +// permissions). An unbuffered Watcher will perform better for almost all use +// cases, and whenever possible you will be better off increasing the kernel +// buffers instead of adding a large userspace buffer. +func NewBufferedWatcher(sz uint) (*Watcher, error) { return NewWatcher() } + +// Close removes all watches and closes the Events channel. +func (w *Watcher) Close() error { return nil } + +// WatchList returns all paths explicitly added with [Watcher.Add] (and are not +// yet removed). +// +// Returns nil if [Watcher.Close] was called. +func (w *Watcher) WatchList() []string { return nil } // Add starts monitoring the path for changes. // -// A path can only be watched once; attempting to watch it more than once will -// return an error. Paths that do not yet exist on the filesystem cannot be -// added. A watch will be automatically removed if the path is deleted. +// A path can only be watched once; watching it more than once is a no-op and will +// not return an error. Paths that do not yet exist on the filesystem cannot be +// watched. // -// A path will remain watched if it gets renamed to somewhere else on the same -// filesystem, but the monitor will get removed if the path gets deleted and -// re-created, or if it's moved to a different filesystem. +// A watch will be automatically removed if the watched path is deleted or +// renamed. The exception is the Windows backend, which doesn't remove the +// watcher on renames. // // Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special // filesystems (/proc, /sys, etc.) generally don't work. // +// Returns [ErrClosed] if [Watcher.Close] was called. +// +// See [Watcher.AddWith] for a version that allows adding options. +// // # Watching directories // // All files in a directory are monitored, including new files that are created @@ -43,17 +173,26 @@ func (w *Watcher) Close() error { // # Watching files // // Watching individual files (rather than directories) is generally not -// recommended as many tools update files atomically. Instead of "just" writing -// to the file a temporary file will be written to first, and if successful the -// temporary file is moved to to destination removing the original, or some -// variant thereof. The watcher on the original file is now lost, as it no -// longer exists. -// -// Instead, watch the parent directory and use Event.Name to filter out files -// you're not interested in. There is an example of this in [cmd/fsnotify/file.go]. -func (w *Watcher) Add(name string) error { - return nil -} +// recommended as many programs (especially editors) update files atomically: it +// will write to a temporary file which is then moved to to destination, +// overwriting the original (or some variant thereof). The watcher on the +// original file is now lost, as that no longer exists. +// +// The upshot of this is that a power failure or crash won't leave a +// half-written file. +// +// Watch the parent directory and use Event.Name to filter out files you're not +// interested in. There is an example of this in cmd/fsnotify/file.go. +func (w *Watcher) Add(name string) error { return nil } + +// AddWith is like [Watcher.Add], but allows adding options. When using Add() +// the defaults described below are used. +// +// Possible options are: +// +// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on +// other platforms. The default is 64K (65536 bytes). +func (w *Watcher) AddWith(name string, opts ...addOpt) error { return nil } // Remove stops monitoring the path for changes. // @@ -61,6 +200,6 @@ func (w *Watcher) Add(name string) error { // /tmp/dir and /tmp/dir/subdir then you will need to remove both. // // Removing a path that has not yet been added returns [ErrNonExistentWatch]. -func (w *Watcher) Remove(name string) error { - return nil -} +// +// Returns nil if [Watcher.Close] was called. +func (w *Watcher) Remove(name string) error { return nil } diff --git a/vendor/github.com/fsnotify/fsnotify/backend_windows.go b/vendor/github.com/fsnotify/fsnotify/backend_windows.go index ae392867..9bc91e5d 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_windows.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_windows.go @@ -1,6 +1,13 @@ //go:build windows // +build windows +// Windows backend based on ReadDirectoryChangesW() +// +// https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-readdirectorychangesw +// +// Note: the documentation on the Watcher type and methods is generated from +// mkdoc.zsh + package fsnotify import ( @@ -27,9 +34,9 @@ import ( // When a file is removed a Remove event won't be emitted until all file // descriptors are closed, and deletes will always emit a Chmod. For example: // -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove +// fp := os.Open("file") +// os.Remove("file") // Triggers Chmod +// fp.Close() // Triggers Remove // // This is the event that inotify sends, so not much can be changed about this. // @@ -43,16 +50,16 @@ import ( // // To increase them you can use sysctl or write the value to the /proc file: // -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 +// # Default values on Linux 5.18 +// sysctl fs.inotify.max_user_watches=124983 +// sysctl fs.inotify.max_user_instances=128 // // To make the changes persist on reboot edit /etc/sysctl.conf or // /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check // your distro's documentation): // -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 +// fs.inotify.max_user_watches=124983 +// fs.inotify.max_user_instances=128 // // Reaching the limit will result in a "no space left on device" or "too many open // files" error. @@ -68,14 +75,20 @@ import ( // control the maximum number of open files, as well as /etc/login.conf on BSD // systems. // -// # macOS notes +// # Windows notes // -// Spotlight indexing on macOS can result in multiple events (see [#15]). A -// temporary workaround is to add your folder(s) to the "Spotlight Privacy -// Settings" until we have a native FSEvents implementation (see [#11]). +// Paths can be added as "C:\path\to\dir", but forward slashes +// ("C:/path/to/dir") will also work. // -// [#11]: https://github.com/fsnotify/fsnotify/issues/11 -// [#15]: https://github.com/fsnotify/fsnotify/issues/15 +// When a watched directory is removed it will always send an event for the +// directory itself, but may not send events for all files in that directory. +// Sometimes it will send events for all times, sometimes it will send no +// events, and often only for some files. +// +// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest +// value that is guaranteed to work with SMB filesystems. If you have many +// events in quick succession this may not be enough, and you will have to use +// [WithBufferSize] to increase the value. type Watcher struct { // Events sends the filesystem change events. // @@ -102,31 +115,52 @@ type Watcher struct { // initiated by the user may show up as one or multiple // writes, depending on when the system syncs things to // disk. For example when compiling a large Go program - // you may get hundreds of Write events, so you - // probably want to wait until you've stopped receiving - // them (see the dedup example in cmd/fsnotify). + // you may get hundreds of Write events, and you may + // want to wait until you've stopped receiving them + // (see the dedup example in cmd/fsnotify). + // + // Some systems may send Write event for directories + // when the directory content changes. // // fsnotify.Chmod Attributes were changed. On Linux this is also sent // when a file is removed (or more accurately, when a // link to an inode is removed). On kqueue it's sent - // and on kqueue when a file is truncated. On Windows - // it's never sent. + // when a file is truncated. On Windows it's never + // sent. Events chan Event // Errors sends any errors. + // + // ErrEventOverflow is used to indicate there are too many events: + // + // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) + // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. + // - kqueue, fen: Not used. Errors chan error port windows.Handle // Handle to completion port input chan *input // Inputs to the reader are sent on this channel quit chan chan<- error - mu sync.Mutex // Protects access to watches, isClosed - watches watchMap // Map of watches (key: i-number) - isClosed bool // Set to true when Close() is first called + mu sync.Mutex // Protects access to watches, closed + watches watchMap // Map of watches (key: i-number) + closed bool // Set to true when Close() is first called } // NewWatcher creates a new Watcher. func NewWatcher() (*Watcher, error) { + return NewBufferedWatcher(50) +} + +// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events +// channel. +// +// The main use case for this is situations with a very large number of events +// where the kernel buffer size can't be increased (e.g. due to lack of +// permissions). An unbuffered Watcher will perform better for almost all use +// cases, and whenever possible you will be better off increasing the kernel +// buffers instead of adding a large userspace buffer. +func NewBufferedWatcher(sz uint) (*Watcher, error) { port, err := windows.CreateIoCompletionPort(windows.InvalidHandle, 0, 0, 0) if err != nil { return nil, os.NewSyscallError("CreateIoCompletionPort", err) @@ -135,7 +169,7 @@ func NewWatcher() (*Watcher, error) { port: port, watches: make(watchMap), input: make(chan *input, 1), - Events: make(chan Event, 50), + Events: make(chan Event, sz), Errors: make(chan error), quit: make(chan chan<- error, 1), } @@ -143,6 +177,12 @@ func NewWatcher() (*Watcher, error) { return w, nil } +func (w *Watcher) isClosed() bool { + w.mu.Lock() + defer w.mu.Unlock() + return w.closed +} + func (w *Watcher) sendEvent(name string, mask uint64) bool { if mask == 0 { return false @@ -167,14 +207,14 @@ func (w *Watcher) sendError(err error) bool { return false } -// Close removes all watches and closes the events channel. +// Close removes all watches and closes the Events channel. func (w *Watcher) Close() error { - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() + if w.isClosed() { return nil } - w.isClosed = true + + w.mu.Lock() + w.closed = true w.mu.Unlock() // Send "quit" message to the reader goroutine @@ -188,17 +228,21 @@ func (w *Watcher) Close() error { // Add starts monitoring the path for changes. // -// A path can only be watched once; attempting to watch it more than once will -// return an error. Paths that do not yet exist on the filesystem cannot be -// added. A watch will be automatically removed if the path is deleted. +// A path can only be watched once; watching it more than once is a no-op and will +// not return an error. Paths that do not yet exist on the filesystem cannot be +// watched. // -// A path will remain watched if it gets renamed to somewhere else on the same -// filesystem, but the monitor will get removed if the path gets deleted and -// re-created, or if it's moved to a different filesystem. +// A watch will be automatically removed if the watched path is deleted or +// renamed. The exception is the Windows backend, which doesn't remove the +// watcher on renames. // // Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special // filesystems (/proc, /sys, etc.) generally don't work. // +// Returns [ErrClosed] if [Watcher.Close] was called. +// +// See [Watcher.AddWith] for a version that allows adding options. +// // # Watching directories // // All files in a directory are monitored, including new files that are created @@ -208,27 +252,41 @@ func (w *Watcher) Close() error { // # Watching files // // Watching individual files (rather than directories) is generally not -// recommended as many tools update files atomically. Instead of "just" writing -// to the file a temporary file will be written to first, and if successful the -// temporary file is moved to to destination removing the original, or some -// variant thereof. The watcher on the original file is now lost, as it no -// longer exists. -// -// Instead, watch the parent directory and use Event.Name to filter out files -// you're not interested in. There is an example of this in [cmd/fsnotify/file.go]. -func (w *Watcher) Add(name string) error { - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() - return errors.New("watcher already closed") +// recommended as many programs (especially editors) update files atomically: it +// will write to a temporary file which is then moved to to destination, +// overwriting the original (or some variant thereof). The watcher on the +// original file is now lost, as that no longer exists. +// +// The upshot of this is that a power failure or crash won't leave a +// half-written file. +// +// Watch the parent directory and use Event.Name to filter out files you're not +// interested in. There is an example of this in cmd/fsnotify/file.go. +func (w *Watcher) Add(name string) error { return w.AddWith(name) } + +// AddWith is like [Watcher.Add], but allows adding options. When using Add() +// the defaults described below are used. +// +// Possible options are: +// +// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on +// other platforms. The default is 64K (65536 bytes). +func (w *Watcher) AddWith(name string, opts ...addOpt) error { + if w.isClosed() { + return ErrClosed + } + + with := getOptions(opts...) + if with.bufsize < 4096 { + return fmt.Errorf("fsnotify.WithBufferSize: buffer size cannot be smaller than 4096 bytes") } - w.mu.Unlock() in := &input{ - op: opAddWatch, - path: filepath.Clean(name), - flags: sysFSALLEVENTS, - reply: make(chan error), + op: opAddWatch, + path: filepath.Clean(name), + flags: sysFSALLEVENTS, + reply: make(chan error), + bufsize: with.bufsize, } w.input <- in if err := w.wakeupReader(); err != nil { @@ -243,7 +301,13 @@ func (w *Watcher) Add(name string) error { // /tmp/dir and /tmp/dir/subdir then you will need to remove both. // // Removing a path that has not yet been added returns [ErrNonExistentWatch]. +// +// Returns nil if [Watcher.Close] was called. func (w *Watcher) Remove(name string) error { + if w.isClosed() { + return nil + } + in := &input{ op: opRemoveWatch, path: filepath.Clean(name), @@ -256,8 +320,15 @@ func (w *Watcher) Remove(name string) error { return <-in.reply } -// WatchList returns all paths added with [Add] (and are not yet removed). +// WatchList returns all paths explicitly added with [Watcher.Add] (and are not +// yet removed). +// +// Returns nil if [Watcher.Close] was called. func (w *Watcher) WatchList() []string { + if w.isClosed() { + return nil + } + w.mu.Lock() defer w.mu.Unlock() @@ -279,7 +350,6 @@ func (w *Watcher) WatchList() []string { // This should all be removed at some point, and just use windows.FILE_NOTIFY_* const ( sysFSALLEVENTS = 0xfff - sysFSATTRIB = 0x4 sysFSCREATE = 0x100 sysFSDELETE = 0x200 sysFSDELETESELF = 0x400 @@ -305,9 +375,6 @@ func (w *Watcher) newEvent(name string, mask uint32) Event { if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM { e.Op |= Rename } - if mask&sysFSATTRIB == sysFSATTRIB { - e.Op |= Chmod - } return e } @@ -321,10 +388,11 @@ const ( ) type input struct { - op int - path string - flags uint32 - reply chan error + op int + path string + flags uint32 + bufsize int + reply chan error } type inode struct { @@ -334,13 +402,14 @@ type inode struct { } type watch struct { - ov windows.Overlapped - ino *inode // i-number - path string // Directory path - mask uint64 // Directory itself is being watched with these notify flags - names map[string]uint64 // Map of names being watched and their notify flags - rename string // Remembers the old name while renaming a file - buf [65536]byte // 64K buffer + ov windows.Overlapped + ino *inode // i-number + recurse bool // Recursive watch? + path string // Directory path + mask uint64 // Directory itself is being watched with these notify flags + names map[string]uint64 // Map of names being watched and their notify flags + rename string // Remembers the old name while renaming a file + buf []byte // buffer, allocated later } type ( @@ -413,7 +482,10 @@ func (m watchMap) set(ino *inode, watch *watch) { } // Must run within the I/O thread. -func (w *Watcher) addWatch(pathname string, flags uint64) error { +func (w *Watcher) addWatch(pathname string, flags uint64, bufsize int) error { + //pathname, recurse := recursivePath(pathname) + recurse := false + dir, err := w.getDir(pathname) if err != nil { return err @@ -433,9 +505,11 @@ func (w *Watcher) addWatch(pathname string, flags uint64) error { return os.NewSyscallError("CreateIoCompletionPort", err) } watchEntry = &watch{ - ino: ino, - path: dir, - names: make(map[string]uint64), + ino: ino, + path: dir, + names: make(map[string]uint64), + recurse: recurse, + buf: make([]byte, bufsize), } w.mu.Lock() w.watches.set(ino, watchEntry) @@ -465,6 +539,8 @@ func (w *Watcher) addWatch(pathname string, flags uint64) error { // Must run within the I/O thread. func (w *Watcher) remWatch(pathname string) error { + pathname, recurse := recursivePath(pathname) + dir, err := w.getDir(pathname) if err != nil { return err @@ -478,6 +554,10 @@ func (w *Watcher) remWatch(pathname string) error { watch := w.watches.get(ino) w.mu.Unlock() + if recurse && !watch.recurse { + return fmt.Errorf("can't use \\... with non-recursive watch %q", pathname) + } + err = windows.CloseHandle(ino.handle) if err != nil { w.sendError(os.NewSyscallError("CloseHandle", err)) @@ -535,8 +615,11 @@ func (w *Watcher) startRead(watch *watch) error { return nil } - rdErr := windows.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0], - uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0) + // We need to pass the array, rather than the slice. + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&watch.buf)) + rdErr := windows.ReadDirectoryChanges(watch.ino.handle, + (*byte)(unsafe.Pointer(hdr.Data)), uint32(hdr.Len), + watch.recurse, mask, nil, &watch.ov, 0) if rdErr != nil { err := os.NewSyscallError("ReadDirectoryChanges", rdErr) if rdErr == windows.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 { @@ -563,9 +646,8 @@ func (w *Watcher) readEvents() { runtime.LockOSThread() for { + // This error is handled after the watch == nil check below. qErr := windows.GetQueuedCompletionStatus(w.port, &n, &key, &ov, windows.INFINITE) - // This error is handled after the watch == nil check below. NOTE: this - // seems odd, note sure if it's correct. watch := (*watch)(unsafe.Pointer(ov)) if watch == nil { @@ -595,7 +677,7 @@ func (w *Watcher) readEvents() { case in := <-w.input: switch in.op { case opAddWatch: - in.reply <- w.addWatch(in.path, uint64(in.flags)) + in.reply <- w.addWatch(in.path, uint64(in.flags), in.bufsize) case opRemoveWatch: in.reply <- w.remWatch(in.path) } @@ -605,6 +687,8 @@ func (w *Watcher) readEvents() { } switch qErr { + case nil: + // No error case windows.ERROR_MORE_DATA: if watch == nil { w.sendError(errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer")) @@ -626,13 +710,12 @@ func (w *Watcher) readEvents() { default: w.sendError(os.NewSyscallError("GetQueuedCompletionPort", qErr)) continue - case nil: } var offset uint32 for { if n == 0 { - w.sendError(errors.New("short read in readEvents()")) + w.sendError(ErrEventOverflow) break } @@ -703,8 +786,9 @@ func (w *Watcher) readEvents() { // Error! if offset >= n { + //lint:ignore ST1005 Windows should be capitalized w.sendError(errors.New( - "Windows system assumed buffer larger than it is, events have likely been missed.")) + "Windows system assumed buffer larger than it is, events have likely been missed")) break } } @@ -720,9 +804,6 @@ func (w *Watcher) toWindowsFlags(mask uint64) uint32 { if mask&sysFSMODIFY != 0 { m |= windows.FILE_NOTIFY_CHANGE_LAST_WRITE } - if mask&sysFSATTRIB != 0 { - m |= windows.FILE_NOTIFY_CHANGE_ATTRIBUTES - } if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 { m |= windows.FILE_NOTIFY_CHANGE_FILE_NAME | windows.FILE_NOTIFY_CHANGE_DIR_NAME } diff --git a/vendor/github.com/fsnotify/fsnotify/fsnotify.go b/vendor/github.com/fsnotify/fsnotify/fsnotify.go index 30a5bf0f..24c99cc4 100644 --- a/vendor/github.com/fsnotify/fsnotify/fsnotify.go +++ b/vendor/github.com/fsnotify/fsnotify/fsnotify.go @@ -1,13 +1,18 @@ -//go:build !plan9 -// +build !plan9 - // Package fsnotify provides a cross-platform interface for file system // notifications. +// +// Currently supported systems: +// +// Linux 2.6.32+ via inotify +// BSD, macOS via kqueue +// Windows via ReadDirectoryChangesW +// illumos via FEN package fsnotify import ( "errors" "fmt" + "path/filepath" "strings" ) @@ -33,34 +38,52 @@ type Op uint32 // The operations fsnotify can trigger; see the documentation on [Watcher] for a // full description, and check them with [Event.Has]. const ( + // A new pathname was created. Create Op = 1 << iota + + // The pathname was written to; this does *not* mean the write has finished, + // and a write can be followed by more writes. Write + + // The path was removed; any watches on it will be removed. Some "remove" + // operations may trigger a Rename if the file is actually moved (for + // example "remove to trash" is often a rename). Remove + + // The path was renamed to something else; any watched on it will be + // removed. Rename + + // File attributes were changed. + // + // It's generally not recommended to take action on this event, as it may + // get triggered very frequently by some software. For example, Spotlight + // indexing on macOS, anti-virus software, backup software, etc. Chmod ) -// Common errors that can be reported by a watcher +// Common errors that can be reported. var ( - ErrNonExistentWatch = errors.New("can't remove non-existent watcher") - ErrEventOverflow = errors.New("fsnotify queue overflow") + ErrNonExistentWatch = errors.New("fsnotify: can't remove non-existent watch") + ErrEventOverflow = errors.New("fsnotify: queue or buffer overflow") + ErrClosed = errors.New("fsnotify: watcher already closed") ) -func (op Op) String() string { +func (o Op) String() string { var b strings.Builder - if op.Has(Create) { + if o.Has(Create) { b.WriteString("|CREATE") } - if op.Has(Remove) { + if o.Has(Remove) { b.WriteString("|REMOVE") } - if op.Has(Write) { + if o.Has(Write) { b.WriteString("|WRITE") } - if op.Has(Rename) { + if o.Has(Rename) { b.WriteString("|RENAME") } - if op.Has(Chmod) { + if o.Has(Chmod) { b.WriteString("|CHMOD") } if b.Len() == 0 { @@ -70,7 +93,7 @@ func (op Op) String() string { } // Has reports if this operation has the given operation. -func (o Op) Has(h Op) bool { return o&h == h } +func (o Op) Has(h Op) bool { return o&h != 0 } // Has reports if this event has the given operation. func (e Event) Has(op Op) bool { return e.Op.Has(op) } @@ -79,3 +102,45 @@ func (e Event) Has(op Op) bool { return e.Op.Has(op) } func (e Event) String() string { return fmt.Sprintf("%-13s %q", e.Op.String(), e.Name) } + +type ( + addOpt func(opt *withOpts) + withOpts struct { + bufsize int + } +) + +var defaultOpts = withOpts{ + bufsize: 65536, // 64K +} + +func getOptions(opts ...addOpt) withOpts { + with := defaultOpts + for _, o := range opts { + o(&with) + } + return with +} + +// WithBufferSize sets the [ReadDirectoryChangesW] buffer size. +// +// This only has effect on Windows systems, and is a no-op for other backends. +// +// The default value is 64K (65536 bytes) which is the highest value that works +// on all filesystems and should be enough for most applications, but if you +// have a large burst of events it may not be enough. You can increase it if +// you're hitting "queue or buffer overflow" errors ([ErrEventOverflow]). +// +// [ReadDirectoryChangesW]: https://learn.microsoft.com/en-gb/windows/win32/api/winbase/nf-winbase-readdirectorychangesw +func WithBufferSize(bytes int) addOpt { + return func(opt *withOpts) { opt.bufsize = bytes } +} + +// Check if this path is recursive (ends with "/..." or "\..."), and return the +// path with the /... stripped. +func recursivePath(path string) (string, bool) { + if filepath.Base(path) == "..." { + return filepath.Dir(path), true + } + return path, false +} diff --git a/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh b/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh index b09ef768..99012ae6 100644 --- a/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh +++ b/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh @@ -2,8 +2,8 @@ [ "${ZSH_VERSION:-}" = "" ] && echo >&2 "Only works with zsh" && exit 1 setopt err_exit no_unset pipefail extended_glob -# Simple script to update the godoc comments on all watchers. Probably took me -# more time to write this than doing it manually, but ah well 🙃 +# Simple script to update the godoc comments on all watchers so you don't need +# to update the same comment 5 times. watcher=$(< github.com/opencontainers/runc v1.1.1-0.20230904132852-a0466dd76f23 diff --git a/vendor/sigs.k8s.io/yaml/LICENSE b/vendor/sigs.k8s.io/yaml/LICENSE index 7805d36d..093d6d3e 100644 --- a/vendor/sigs.k8s.io/yaml/LICENSE +++ b/vendor/sigs.k8s.io/yaml/LICENSE @@ -48,3 +48,259 @@ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# The forked go-yaml.v3 library under this project is covered by two +different licenses (MIT and Apache): + +#### MIT License #### + +The following files were ported to Go from C files of libyaml, and thus +are still covered by their original MIT license, with the additional +copyright staring in 2011 when the project was ported over: + + apic.go emitterc.go parserc.go readerc.go scannerc.go + writerc.go yamlh.go yamlprivateh.go + +Copyright (c) 2006-2010 Kirill Simonov +Copyright (c) 2006-2011 Kirill Simonov + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +### Apache License ### + +All the remaining project files are covered by the Apache license: + +Copyright (c) 2011-2019 Canonical Ltd + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +# The forked go-yaml.v2 library under the project is covered by an +Apache license: + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/sigs.k8s.io/yaml/OWNERS b/vendor/sigs.k8s.io/yaml/OWNERS index 325b40b0..003a149e 100644 --- a/vendor/sigs.k8s.io/yaml/OWNERS +++ b/vendor/sigs.k8s.io/yaml/OWNERS @@ -2,26 +2,22 @@ approvers: - dims -- lavalamp +- jpbetz - smarterclayton - deads2k - sttts - liggitt -- caesarxuchao reviewers: - dims - thockin -- lavalamp +- jpbetz - smarterclayton - wojtek-t - deads2k - derekwaynecarr -- caesarxuchao - mikedanese - liggitt -- gmarek - sttts -- ncdc - tallclair labels: - sig/api-machinery diff --git a/vendor/sigs.k8s.io/yaml/fields.go b/vendor/sigs.k8s.io/yaml/fields.go index 235b7f2c..0ea28bd0 100644 --- a/vendor/sigs.k8s.io/yaml/fields.go +++ b/vendor/sigs.k8s.io/yaml/fields.go @@ -16,53 +16,53 @@ import ( "unicode/utf8" ) -// indirect walks down v allocating pointers as needed, +// indirect walks down 'value' allocating pointers as needed, // until it gets to a non-pointer. // if it encounters an Unmarshaler, indirect stops and returns that. // if decodingNull is true, indirect stops at the last pointer so it can be set to nil. -func indirect(v reflect.Value, decodingNull bool) (json.Unmarshaler, encoding.TextUnmarshaler, reflect.Value) { - // If v is a named type and is addressable, +func indirect(value reflect.Value, decodingNull bool) (json.Unmarshaler, encoding.TextUnmarshaler, reflect.Value) { + // If 'value' is a named type and is addressable, // start with its address, so that if the type has pointer methods, // we find them. - if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() { - v = v.Addr() + if value.Kind() != reflect.Ptr && value.Type().Name() != "" && value.CanAddr() { + value = value.Addr() } for { // Load value from interface, but only if the result will be // usefully addressable. - if v.Kind() == reflect.Interface && !v.IsNil() { - e := v.Elem() - if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { - v = e + if value.Kind() == reflect.Interface && !value.IsNil() { + element := value.Elem() + if element.Kind() == reflect.Ptr && !element.IsNil() && (!decodingNull || element.Elem().Kind() == reflect.Ptr) { + value = element continue } } - if v.Kind() != reflect.Ptr { + if value.Kind() != reflect.Ptr { break } - if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() { + if value.Elem().Kind() != reflect.Ptr && decodingNull && value.CanSet() { break } - if v.IsNil() { - if v.CanSet() { - v.Set(reflect.New(v.Type().Elem())) + if value.IsNil() { + if value.CanSet() { + value.Set(reflect.New(value.Type().Elem())) } else { - v = reflect.New(v.Type().Elem()) + value = reflect.New(value.Type().Elem()) } } - if v.Type().NumMethod() > 0 { - if u, ok := v.Interface().(json.Unmarshaler); ok { + if value.Type().NumMethod() > 0 { + if u, ok := value.Interface().(json.Unmarshaler); ok { return u, nil, reflect.Value{} } - if u, ok := v.Interface().(encoding.TextUnmarshaler); ok { + if u, ok := value.Interface().(encoding.TextUnmarshaler); ok { return nil, u, reflect.Value{} } } - v = v.Elem() + value = value.Elem() } - return nil, nil, v + return nil, nil, value } // A field represents a single field found in a struct. @@ -134,8 +134,8 @@ func typeFields(t reflect.Type) []field { next := []field{{typ: t}} // Count of queued names for current level and the next. - count := map[reflect.Type]int{} - nextCount := map[reflect.Type]int{} + var count map[reflect.Type]int + var nextCount map[reflect.Type]int // Types already visited at an earlier level. visited := map[reflect.Type]bool{} @@ -348,8 +348,9 @@ const ( // 4) simpleLetterEqualFold, no specials, no non-letters. // // The letters S and K are special because they map to 3 runes, not just 2: -// * S maps to s and to U+017F 'ſ' Latin small letter long s -// * k maps to K and to U+212A 'K' Kelvin sign +// - S maps to s and to U+017F 'ſ' Latin small letter long s +// - k maps to K and to U+212A 'K' Kelvin sign +// // See http://play.golang.org/p/tTxjOc0OGo // // The returned function is specialized for matching against s and @@ -420,10 +421,8 @@ func equalFoldRight(s, t []byte) bool { t = t[size:] } - if len(t) > 0 { - return false - } - return true + + return len(t) <= 0 } // asciiEqualFold is a specialization of bytes.EqualFold for use when diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/LICENSE b/vendor/sigs.k8s.io/yaml/goyaml.v2/LICENSE new file mode 100644 index 00000000..8dada3ed --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/goyaml.v2/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/LICENSE.libyaml b/vendor/sigs.k8s.io/yaml/goyaml.v2/LICENSE.libyaml new file mode 100644 index 00000000..8da58fbf --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/goyaml.v2/LICENSE.libyaml @@ -0,0 +1,31 @@ +The following files were ported to Go from C files of libyaml, and thus +are still covered by their original copyright and license: + + apic.go + emitterc.go + parserc.go + readerc.go + scannerc.go + writerc.go + yamlh.go + yamlprivateh.go + +Copyright (c) 2006 Kirill Simonov + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/NOTICE b/vendor/sigs.k8s.io/yaml/goyaml.v2/NOTICE new file mode 100644 index 00000000..866d74a7 --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/goyaml.v2/NOTICE @@ -0,0 +1,13 @@ +Copyright 2011-2016 Canonical Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/OWNERS b/vendor/sigs.k8s.io/yaml/goyaml.v2/OWNERS new file mode 100644 index 00000000..73be0a3a --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/goyaml.v2/OWNERS @@ -0,0 +1,24 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: +- dims +- jpbetz +- smarterclayton +- deads2k +- sttts +- liggitt +- natasha41575 +- knverey +reviewers: +- dims +- thockin +- jpbetz +- smarterclayton +- deads2k +- derekwaynecarr +- mikedanese +- liggitt +- sttts +- tallclair +labels: +- sig/api-machinery diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/README.md b/vendor/sigs.k8s.io/yaml/goyaml.v2/README.md new file mode 100644 index 00000000..53f4139d --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/goyaml.v2/README.md @@ -0,0 +1,143 @@ +# go-yaml fork + +This package is a fork of the go-yaml library and is intended solely for consumption +by kubernetes projects. In this fork, we plan to support only critical changes required for +kubernetes, such as small bug fixes and regressions. Larger, general-purpose feature requests +should be made in the upstream go-yaml library, and we will reject such changes in this fork +unless we are pulling them from upstream. + +This fork is based on v2.4.0: https://github.com/go-yaml/yaml/releases/tag/v2.4.0 + +# YAML support for the Go language + +Introduction +------------ + +The yaml package enables Go programs to comfortably encode and decode YAML +values. It was developed within [Canonical](https://www.canonical.com) as +part of the [juju](https://juju.ubuntu.com) project, and is based on a +pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML) +C library to parse and generate YAML data quickly and reliably. + +Compatibility +------------- + +The yaml package supports most of YAML 1.1 and 1.2, including support for +anchors, tags, map merging, etc. Multi-document unmarshalling is not yet +implemented, and base-60 floats from YAML 1.1 are purposefully not +supported since they're a poor design and are gone in YAML 1.2. + +Installation and usage +---------------------- + +The import path for the package is *gopkg.in/yaml.v2*. + +To install it, run: + + go get gopkg.in/yaml.v2 + +API documentation +----------------- + +If opened in a browser, the import path itself leads to the API documentation: + + * [https://gopkg.in/yaml.v2](https://gopkg.in/yaml.v2) + +API stability +------------- + +The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in). + + +License +------- + +The yaml package is licensed under the Apache License 2.0. Please see the LICENSE file for details. + + +Example +------- + +```Go +package main + +import ( + "fmt" + "log" + + "gopkg.in/yaml.v2" +) + +var data = ` +a: Easy! +b: + c: 2 + d: [3, 4] +` + +// Note: struct fields must be public in order for unmarshal to +// correctly populate the data. +type T struct { + A string + B struct { + RenamedC int `yaml:"c"` + D []int `yaml:",flow"` + } +} + +func main() { + t := T{} + + err := yaml.Unmarshal([]byte(data), &t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t:\n%v\n\n", t) + + d, err := yaml.Marshal(&t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t dump:\n%s\n\n", string(d)) + + m := make(map[interface{}]interface{}) + + err = yaml.Unmarshal([]byte(data), &m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m:\n%v\n\n", m) + + d, err = yaml.Marshal(&m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m dump:\n%s\n\n", string(d)) +} +``` + +This example will generate the following output: + +``` +--- t: +{Easy! {2 [3 4]}} + +--- t dump: +a: Easy! +b: + c: 2 + d: [3, 4] + + +--- m: +map[a:Easy! b:map[c:2 d:[3 4]]] + +--- m dump: +a: Easy! +b: + c: 2 + d: + - 3 + - 4 +``` + diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/apic.go b/vendor/sigs.k8s.io/yaml/goyaml.v2/apic.go new file mode 100644 index 00000000..acf71402 --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/goyaml.v2/apic.go @@ -0,0 +1,744 @@ +package yaml + +import ( + "io" +) + +func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) { + //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens)) + + // Check if we can move the queue at the beginning of the buffer. + if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) { + if parser.tokens_head != len(parser.tokens) { + copy(parser.tokens, parser.tokens[parser.tokens_head:]) + } + parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head] + parser.tokens_head = 0 + } + parser.tokens = append(parser.tokens, *token) + if pos < 0 { + return + } + copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:]) + parser.tokens[parser.tokens_head+pos] = *token +} + +// Create a new parser object. +func yaml_parser_initialize(parser *yaml_parser_t) bool { + *parser = yaml_parser_t{ + raw_buffer: make([]byte, 0, input_raw_buffer_size), + buffer: make([]byte, 0, input_buffer_size), + } + return true +} + +// Destroy a parser object. +func yaml_parser_delete(parser *yaml_parser_t) { + *parser = yaml_parser_t{} +} + +// String read handler. +func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + if parser.input_pos == len(parser.input) { + return 0, io.EOF + } + n = copy(buffer, parser.input[parser.input_pos:]) + parser.input_pos += n + return n, nil +} + +// Reader read handler. +func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + return parser.input_reader.Read(buffer) +} + +// Set a string input. +func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_string_read_handler + parser.input = input + parser.input_pos = 0 +} + +// Set a file input. +func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_reader_read_handler + parser.input_reader = r +} + +// Set the source encoding. +func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) { + if parser.encoding != yaml_ANY_ENCODING { + panic("must set the encoding only once") + } + parser.encoding = encoding +} + +var disableLineWrapping = false + +// Create a new emitter object. +func yaml_emitter_initialize(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{ + buffer: make([]byte, output_buffer_size), + raw_buffer: make([]byte, 0, output_raw_buffer_size), + states: make([]yaml_emitter_state_t, 0, initial_stack_size), + events: make([]yaml_event_t, 0, initial_queue_size), + } + if disableLineWrapping { + emitter.best_width = -1 + } +} + +// Destroy an emitter object. +func yaml_emitter_delete(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{} +} + +// String write handler. +func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + *emitter.output_buffer = append(*emitter.output_buffer, buffer...) + return nil +} + +// yaml_writer_write_handler uses emitter.output_writer to write the +// emitted text. +func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + _, err := emitter.output_writer.Write(buffer) + return err +} + +// Set a string output. +func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_string_write_handler + emitter.output_buffer = output_buffer +} + +// Set a file output. +func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_writer_write_handler + emitter.output_writer = w +} + +// Set the output encoding. +func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) { + if emitter.encoding != yaml_ANY_ENCODING { + panic("must set the output encoding only once") + } + emitter.encoding = encoding +} + +// Set the canonical output style. +func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) { + emitter.canonical = canonical +} + +//// Set the indentation increment. +func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) { + if indent < 2 || indent > 9 { + indent = 2 + } + emitter.best_indent = indent +} + +// Set the preferred line width. +func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) { + if width < 0 { + width = -1 + } + emitter.best_width = width +} + +// Set if unescaped non-ASCII characters are allowed. +func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) { + emitter.unicode = unicode +} + +// Set the preferred line break character. +func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) { + emitter.line_break = line_break +} + +///* +// * Destroy a token object. +// */ +// +//YAML_DECLARE(void) +//yaml_token_delete(yaml_token_t *token) +//{ +// assert(token); // Non-NULL token object expected. +// +// switch (token.type) +// { +// case YAML_TAG_DIRECTIVE_TOKEN: +// yaml_free(token.data.tag_directive.handle); +// yaml_free(token.data.tag_directive.prefix); +// break; +// +// case YAML_ALIAS_TOKEN: +// yaml_free(token.data.alias.value); +// break; +// +// case YAML_ANCHOR_TOKEN: +// yaml_free(token.data.anchor.value); +// break; +// +// case YAML_TAG_TOKEN: +// yaml_free(token.data.tag.handle); +// yaml_free(token.data.tag.suffix); +// break; +// +// case YAML_SCALAR_TOKEN: +// yaml_free(token.data.scalar.value); +// break; +// +// default: +// break; +// } +// +// memset(token, 0, sizeof(yaml_token_t)); +//} +// +///* +// * Check if a string is a valid UTF-8 sequence. +// * +// * Check 'reader.c' for more details on UTF-8 encoding. +// */ +// +//static int +//yaml_check_utf8(yaml_char_t *start, size_t length) +//{ +// yaml_char_t *end = start+length; +// yaml_char_t *pointer = start; +// +// while (pointer < end) { +// unsigned char octet; +// unsigned int width; +// unsigned int value; +// size_t k; +// +// octet = pointer[0]; +// width = (octet & 0x80) == 0x00 ? 1 : +// (octet & 0xE0) == 0xC0 ? 2 : +// (octet & 0xF0) == 0xE0 ? 3 : +// (octet & 0xF8) == 0xF0 ? 4 : 0; +// value = (octet & 0x80) == 0x00 ? octet & 0x7F : +// (octet & 0xE0) == 0xC0 ? octet & 0x1F : +// (octet & 0xF0) == 0xE0 ? octet & 0x0F : +// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0; +// if (!width) return 0; +// if (pointer+width > end) return 0; +// for (k = 1; k < width; k ++) { +// octet = pointer[k]; +// if ((octet & 0xC0) != 0x80) return 0; +// value = (value << 6) + (octet & 0x3F); +// } +// if (!((width == 1) || +// (width == 2 && value >= 0x80) || +// (width == 3 && value >= 0x800) || +// (width == 4 && value >= 0x10000))) return 0; +// +// pointer += width; +// } +// +// return 1; +//} +// + +// Create STREAM-START. +func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) { + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + encoding: encoding, + } +} + +// Create STREAM-END. +func yaml_stream_end_event_initialize(event *yaml_event_t) { + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + } +} + +// Create DOCUMENT-START. +func yaml_document_start_event_initialize( + event *yaml_event_t, + version_directive *yaml_version_directive_t, + tag_directives []yaml_tag_directive_t, + implicit bool, +) { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: implicit, + } +} + +// Create DOCUMENT-END. +func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + implicit: implicit, + } +} + +///* +// * Create ALIAS. +// */ +// +//YAML_DECLARE(int) +//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t) +//{ +// mark yaml_mark_t = { 0, 0, 0 } +// anchor_copy *yaml_char_t = NULL +// +// assert(event) // Non-NULL event object is expected. +// assert(anchor) // Non-NULL anchor is expected. +// +// if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0 +// +// anchor_copy = yaml_strdup(anchor) +// if (!anchor_copy) +// return 0 +// +// ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark) +// +// return 1 +//} + +// Create SCALAR. +func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + anchor: anchor, + tag: tag, + value: value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-START. +func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-END. +func yaml_sequence_end_event_initialize(event *yaml_event_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + } + return true +} + +// Create MAPPING-START. +func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) { + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } +} + +// Create MAPPING-END. +func yaml_mapping_end_event_initialize(event *yaml_event_t) { + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + } +} + +// Destroy an event object. +func yaml_event_delete(event *yaml_event_t) { + *event = yaml_event_t{} +} + +///* +// * Create a document object. +// */ +// +//YAML_DECLARE(int) +//yaml_document_initialize(document *yaml_document_t, +// version_directive *yaml_version_directive_t, +// tag_directives_start *yaml_tag_directive_t, +// tag_directives_end *yaml_tag_directive_t, +// start_implicit int, end_implicit int) +//{ +// struct { +// error yaml_error_type_t +// } context +// struct { +// start *yaml_node_t +// end *yaml_node_t +// top *yaml_node_t +// } nodes = { NULL, NULL, NULL } +// version_directive_copy *yaml_version_directive_t = NULL +// struct { +// start *yaml_tag_directive_t +// end *yaml_tag_directive_t +// top *yaml_tag_directive_t +// } tag_directives_copy = { NULL, NULL, NULL } +// value yaml_tag_directive_t = { NULL, NULL } +// mark yaml_mark_t = { 0, 0, 0 } +// +// assert(document) // Non-NULL document object is expected. +// assert((tag_directives_start && tag_directives_end) || +// (tag_directives_start == tag_directives_end)) +// // Valid tag directives are expected. +// +// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error +// +// if (version_directive) { +// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t)) +// if (!version_directive_copy) goto error +// version_directive_copy.major = version_directive.major +// version_directive_copy.minor = version_directive.minor +// } +// +// if (tag_directives_start != tag_directives_end) { +// tag_directive *yaml_tag_directive_t +// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE)) +// goto error +// for (tag_directive = tag_directives_start +// tag_directive != tag_directives_end; tag_directive ++) { +// assert(tag_directive.handle) +// assert(tag_directive.prefix) +// if (!yaml_check_utf8(tag_directive.handle, +// strlen((char *)tag_directive.handle))) +// goto error +// if (!yaml_check_utf8(tag_directive.prefix, +// strlen((char *)tag_directive.prefix))) +// goto error +// value.handle = yaml_strdup(tag_directive.handle) +// value.prefix = yaml_strdup(tag_directive.prefix) +// if (!value.handle || !value.prefix) goto error +// if (!PUSH(&context, tag_directives_copy, value)) +// goto error +// value.handle = NULL +// value.prefix = NULL +// } +// } +// +// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy, +// tag_directives_copy.start, tag_directives_copy.top, +// start_implicit, end_implicit, mark, mark) +// +// return 1 +// +//error: +// STACK_DEL(&context, nodes) +// yaml_free(version_directive_copy) +// while (!STACK_EMPTY(&context, tag_directives_copy)) { +// value yaml_tag_directive_t = POP(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// } +// STACK_DEL(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// +// return 0 +//} +// +///* +// * Destroy a document object. +// */ +// +//YAML_DECLARE(void) +//yaml_document_delete(document *yaml_document_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// tag_directive *yaml_tag_directive_t +// +// context.error = YAML_NO_ERROR // Eliminate a compiler warning. +// +// assert(document) // Non-NULL document object is expected. +// +// while (!STACK_EMPTY(&context, document.nodes)) { +// node yaml_node_t = POP(&context, document.nodes) +// yaml_free(node.tag) +// switch (node.type) { +// case YAML_SCALAR_NODE: +// yaml_free(node.data.scalar.value) +// break +// case YAML_SEQUENCE_NODE: +// STACK_DEL(&context, node.data.sequence.items) +// break +// case YAML_MAPPING_NODE: +// STACK_DEL(&context, node.data.mapping.pairs) +// break +// default: +// assert(0) // Should not happen. +// } +// } +// STACK_DEL(&context, document.nodes) +// +// yaml_free(document.version_directive) +// for (tag_directive = document.tag_directives.start +// tag_directive != document.tag_directives.end +// tag_directive++) { +// yaml_free(tag_directive.handle) +// yaml_free(tag_directive.prefix) +// } +// yaml_free(document.tag_directives.start) +// +// memset(document, 0, sizeof(yaml_document_t)) +//} +// +///** +// * Get a document node. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_node(document *yaml_document_t, index int) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (index > 0 && document.nodes.start + index <= document.nodes.top) { +// return document.nodes.start + index - 1 +// } +// return NULL +//} +// +///** +// * Get the root object. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_root_node(document *yaml_document_t) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (document.nodes.top != document.nodes.start) { +// return document.nodes.start +// } +// return NULL +//} +// +///* +// * Add a scalar node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_scalar(document *yaml_document_t, +// tag *yaml_char_t, value *yaml_char_t, length int, +// style yaml_scalar_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// value_copy *yaml_char_t = NULL +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// assert(value) // Non-NULL value is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (length < 0) { +// length = strlen((char *)value) +// } +// +// if (!yaml_check_utf8(value, length)) goto error +// value_copy = yaml_malloc(length+1) +// if (!value_copy) goto error +// memcpy(value_copy, value, length) +// value_copy[length] = '\0' +// +// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// yaml_free(tag_copy) +// yaml_free(value_copy) +// +// return 0 +//} +// +///* +// * Add a sequence node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_sequence(document *yaml_document_t, +// tag *yaml_char_t, style yaml_sequence_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_item_t +// end *yaml_node_item_t +// top *yaml_node_item_t +// } items = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error +// +// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, items) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Add a mapping node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_mapping(document *yaml_document_t, +// tag *yaml_char_t, style yaml_mapping_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_pair_t +// end *yaml_node_pair_t +// top *yaml_node_pair_t +// } pairs = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error +// +// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, pairs) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Append an item to a sequence node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_sequence_item(document *yaml_document_t, +// sequence int, item int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// assert(document) // Non-NULL document is required. +// assert(sequence > 0 +// && document.nodes.start + sequence <= document.nodes.top) +// // Valid sequence id is required. +// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE) +// // A sequence node is required. +// assert(item > 0 && document.nodes.start + item <= document.nodes.top) +// // Valid item id is required. +// +// if (!PUSH(&context, +// document.nodes.start[sequence-1].data.sequence.items, item)) +// return 0 +// +// return 1 +//} +// +///* +// * Append a pair of a key and a value to a mapping node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_mapping_pair(document *yaml_document_t, +// mapping int, key int, value int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// pair yaml_node_pair_t +// +// assert(document) // Non-NULL document is required. +// assert(mapping > 0 +// && document.nodes.start + mapping <= document.nodes.top) +// // Valid mapping id is required. +// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE) +// // A mapping node is required. +// assert(key > 0 && document.nodes.start + key <= document.nodes.top) +// // Valid key id is required. +// assert(value > 0 && document.nodes.start + value <= document.nodes.top) +// // Valid value id is required. +// +// pair.key = key +// pair.value = value +// +// if (!PUSH(&context, +// document.nodes.start[mapping-1].data.mapping.pairs, pair)) +// return 0 +// +// return 1 +//} +// +// diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/decode.go b/vendor/sigs.k8s.io/yaml/goyaml.v2/decode.go new file mode 100644 index 00000000..129bc2a9 --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/goyaml.v2/decode.go @@ -0,0 +1,815 @@ +package yaml + +import ( + "encoding" + "encoding/base64" + "fmt" + "io" + "math" + "reflect" + "strconv" + "time" +) + +const ( + documentNode = 1 << iota + mappingNode + sequenceNode + scalarNode + aliasNode +) + +type node struct { + kind int + line, column int + tag string + // For an alias node, alias holds the resolved alias. + alias *node + value string + implicit bool + children []*node + anchors map[string]*node +} + +// ---------------------------------------------------------------------------- +// Parser, produces a node tree out of a libyaml event stream. + +type parser struct { + parser yaml_parser_t + event yaml_event_t + doc *node + doneInit bool +} + +func newParser(b []byte) *parser { + p := parser{} + if !yaml_parser_initialize(&p.parser) { + panic("failed to initialize YAML emitter") + } + if len(b) == 0 { + b = []byte{'\n'} + } + yaml_parser_set_input_string(&p.parser, b) + return &p +} + +func newParserFromReader(r io.Reader) *parser { + p := parser{} + if !yaml_parser_initialize(&p.parser) { + panic("failed to initialize YAML emitter") + } + yaml_parser_set_input_reader(&p.parser, r) + return &p +} + +func (p *parser) init() { + if p.doneInit { + return + } + p.expect(yaml_STREAM_START_EVENT) + p.doneInit = true +} + +func (p *parser) destroy() { + if p.event.typ != yaml_NO_EVENT { + yaml_event_delete(&p.event) + } + yaml_parser_delete(&p.parser) +} + +// expect consumes an event from the event stream and +// checks that it's of the expected type. +func (p *parser) expect(e yaml_event_type_t) { + if p.event.typ == yaml_NO_EVENT { + if !yaml_parser_parse(&p.parser, &p.event) { + p.fail() + } + } + if p.event.typ == yaml_STREAM_END_EVENT { + failf("attempted to go past the end of stream; corrupted value?") + } + if p.event.typ != e { + p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ) + p.fail() + } + yaml_event_delete(&p.event) + p.event.typ = yaml_NO_EVENT +} + +// peek peeks at the next event in the event stream, +// puts the results into p.event and returns the event type. +func (p *parser) peek() yaml_event_type_t { + if p.event.typ != yaml_NO_EVENT { + return p.event.typ + } + if !yaml_parser_parse(&p.parser, &p.event) { + p.fail() + } + return p.event.typ +} + +func (p *parser) fail() { + var where string + var line int + if p.parser.problem_mark.line != 0 { + line = p.parser.problem_mark.line + // Scanner errors don't iterate line before returning error + if p.parser.error == yaml_SCANNER_ERROR { + line++ + } + } else if p.parser.context_mark.line != 0 { + line = p.parser.context_mark.line + } + if line != 0 { + where = "line " + strconv.Itoa(line) + ": " + } + var msg string + if len(p.parser.problem) > 0 { + msg = p.parser.problem + } else { + msg = "unknown problem parsing YAML content" + } + failf("%s%s", where, msg) +} + +func (p *parser) anchor(n *node, anchor []byte) { + if anchor != nil { + p.doc.anchors[string(anchor)] = n + } +} + +func (p *parser) parse() *node { + p.init() + switch p.peek() { + case yaml_SCALAR_EVENT: + return p.scalar() + case yaml_ALIAS_EVENT: + return p.alias() + case yaml_MAPPING_START_EVENT: + return p.mapping() + case yaml_SEQUENCE_START_EVENT: + return p.sequence() + case yaml_DOCUMENT_START_EVENT: + return p.document() + case yaml_STREAM_END_EVENT: + // Happens when attempting to decode an empty buffer. + return nil + default: + panic("attempted to parse unknown event: " + p.event.typ.String()) + } +} + +func (p *parser) node(kind int) *node { + return &node{ + kind: kind, + line: p.event.start_mark.line, + column: p.event.start_mark.column, + } +} + +func (p *parser) document() *node { + n := p.node(documentNode) + n.anchors = make(map[string]*node) + p.doc = n + p.expect(yaml_DOCUMENT_START_EVENT) + n.children = append(n.children, p.parse()) + p.expect(yaml_DOCUMENT_END_EVENT) + return n +} + +func (p *parser) alias() *node { + n := p.node(aliasNode) + n.value = string(p.event.anchor) + n.alias = p.doc.anchors[n.value] + if n.alias == nil { + failf("unknown anchor '%s' referenced", n.value) + } + p.expect(yaml_ALIAS_EVENT) + return n +} + +func (p *parser) scalar() *node { + n := p.node(scalarNode) + n.value = string(p.event.value) + n.tag = string(p.event.tag) + n.implicit = p.event.implicit + p.anchor(n, p.event.anchor) + p.expect(yaml_SCALAR_EVENT) + return n +} + +func (p *parser) sequence() *node { + n := p.node(sequenceNode) + p.anchor(n, p.event.anchor) + p.expect(yaml_SEQUENCE_START_EVENT) + for p.peek() != yaml_SEQUENCE_END_EVENT { + n.children = append(n.children, p.parse()) + } + p.expect(yaml_SEQUENCE_END_EVENT) + return n +} + +func (p *parser) mapping() *node { + n := p.node(mappingNode) + p.anchor(n, p.event.anchor) + p.expect(yaml_MAPPING_START_EVENT) + for p.peek() != yaml_MAPPING_END_EVENT { + n.children = append(n.children, p.parse(), p.parse()) + } + p.expect(yaml_MAPPING_END_EVENT) + return n +} + +// ---------------------------------------------------------------------------- +// Decoder, unmarshals a node into a provided value. + +type decoder struct { + doc *node + aliases map[*node]bool + mapType reflect.Type + terrors []string + strict bool + + decodeCount int + aliasCount int + aliasDepth int +} + +var ( + mapItemType = reflect.TypeOf(MapItem{}) + durationType = reflect.TypeOf(time.Duration(0)) + defaultMapType = reflect.TypeOf(map[interface{}]interface{}{}) + ifaceType = defaultMapType.Elem() + timeType = reflect.TypeOf(time.Time{}) + ptrTimeType = reflect.TypeOf(&time.Time{}) +) + +func newDecoder(strict bool) *decoder { + d := &decoder{mapType: defaultMapType, strict: strict} + d.aliases = make(map[*node]bool) + return d +} + +func (d *decoder) terror(n *node, tag string, out reflect.Value) { + if n.tag != "" { + tag = n.tag + } + value := n.value + if tag != yaml_SEQ_TAG && tag != yaml_MAP_TAG { + if len(value) > 10 { + value = " `" + value[:7] + "...`" + } else { + value = " `" + value + "`" + } + } + d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.line+1, shortTag(tag), value, out.Type())) +} + +func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) { + terrlen := len(d.terrors) + err := u.UnmarshalYAML(func(v interface{}) (err error) { + defer handleErr(&err) + d.unmarshal(n, reflect.ValueOf(v)) + if len(d.terrors) > terrlen { + issues := d.terrors[terrlen:] + d.terrors = d.terrors[:terrlen] + return &TypeError{issues} + } + return nil + }) + if e, ok := err.(*TypeError); ok { + d.terrors = append(d.terrors, e.Errors...) + return false + } + if err != nil { + fail(err) + } + return true +} + +// d.prepare initializes and dereferences pointers and calls UnmarshalYAML +// if a value is found to implement it. +// It returns the initialized and dereferenced out value, whether +// unmarshalling was already done by UnmarshalYAML, and if so whether +// its types unmarshalled appropriately. +// +// If n holds a null value, prepare returns before doing anything. +func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) { + if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "~" || n.value == "" && n.implicit) { + return out, false, false + } + again := true + for again { + again = false + if out.Kind() == reflect.Ptr { + if out.IsNil() { + out.Set(reflect.New(out.Type().Elem())) + } + out = out.Elem() + again = true + } + if out.CanAddr() { + if u, ok := out.Addr().Interface().(Unmarshaler); ok { + good = d.callUnmarshaler(n, u) + return out, true, good + } + } + } + return out, false, false +} + +const ( + // 400,000 decode operations is ~500kb of dense object declarations, or + // ~5kb of dense object declarations with 10000% alias expansion + alias_ratio_range_low = 400000 + + // 4,000,000 decode operations is ~5MB of dense object declarations, or + // ~4.5MB of dense object declarations with 10% alias expansion + alias_ratio_range_high = 4000000 + + // alias_ratio_range is the range over which we scale allowed alias ratios + alias_ratio_range = float64(alias_ratio_range_high - alias_ratio_range_low) +) + +func allowedAliasRatio(decodeCount int) float64 { + switch { + case decodeCount <= alias_ratio_range_low: + // allow 99% to come from alias expansion for small-to-medium documents + return 0.99 + case decodeCount >= alias_ratio_range_high: + // allow 10% to come from alias expansion for very large documents + return 0.10 + default: + // scale smoothly from 99% down to 10% over the range. + // this maps to 396,000 - 400,000 allowed alias-driven decodes over the range. + // 400,000 decode operations is ~100MB of allocations in worst-case scenarios (single-item maps). + return 0.99 - 0.89*(float64(decodeCount-alias_ratio_range_low)/alias_ratio_range) + } +} + +func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) { + d.decodeCount++ + if d.aliasDepth > 0 { + d.aliasCount++ + } + if d.aliasCount > 100 && d.decodeCount > 1000 && float64(d.aliasCount)/float64(d.decodeCount) > allowedAliasRatio(d.decodeCount) { + failf("document contains excessive aliasing") + } + switch n.kind { + case documentNode: + return d.document(n, out) + case aliasNode: + return d.alias(n, out) + } + out, unmarshaled, good := d.prepare(n, out) + if unmarshaled { + return good + } + switch n.kind { + case scalarNode: + good = d.scalar(n, out) + case mappingNode: + good = d.mapping(n, out) + case sequenceNode: + good = d.sequence(n, out) + default: + panic("internal error: unknown node kind: " + strconv.Itoa(n.kind)) + } + return good +} + +func (d *decoder) document(n *node, out reflect.Value) (good bool) { + if len(n.children) == 1 { + d.doc = n + d.unmarshal(n.children[0], out) + return true + } + return false +} + +func (d *decoder) alias(n *node, out reflect.Value) (good bool) { + if d.aliases[n] { + // TODO this could actually be allowed in some circumstances. + failf("anchor '%s' value contains itself", n.value) + } + d.aliases[n] = true + d.aliasDepth++ + good = d.unmarshal(n.alias, out) + d.aliasDepth-- + delete(d.aliases, n) + return good +} + +var zeroValue reflect.Value + +func resetMap(out reflect.Value) { + for _, k := range out.MapKeys() { + out.SetMapIndex(k, zeroValue) + } +} + +func (d *decoder) scalar(n *node, out reflect.Value) bool { + var tag string + var resolved interface{} + if n.tag == "" && !n.implicit { + tag = yaml_STR_TAG + resolved = n.value + } else { + tag, resolved = resolve(n.tag, n.value) + if tag == yaml_BINARY_TAG { + data, err := base64.StdEncoding.DecodeString(resolved.(string)) + if err != nil { + failf("!!binary value contains invalid base64 data") + } + resolved = string(data) + } + } + if resolved == nil { + if out.Kind() == reflect.Map && !out.CanAddr() { + resetMap(out) + } else { + out.Set(reflect.Zero(out.Type())) + } + return true + } + if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { + // We've resolved to exactly the type we want, so use that. + out.Set(resolvedv) + return true + } + // Perhaps we can use the value as a TextUnmarshaler to + // set its value. + if out.CanAddr() { + u, ok := out.Addr().Interface().(encoding.TextUnmarshaler) + if ok { + var text []byte + if tag == yaml_BINARY_TAG { + text = []byte(resolved.(string)) + } else { + // We let any value be unmarshaled into TextUnmarshaler. + // That might be more lax than we'd like, but the + // TextUnmarshaler itself should bowl out any dubious values. + text = []byte(n.value) + } + err := u.UnmarshalText(text) + if err != nil { + fail(err) + } + return true + } + } + switch out.Kind() { + case reflect.String: + if tag == yaml_BINARY_TAG { + out.SetString(resolved.(string)) + return true + } + if resolved != nil { + out.SetString(n.value) + return true + } + case reflect.Interface: + if resolved == nil { + out.Set(reflect.Zero(out.Type())) + } else if tag == yaml_TIMESTAMP_TAG { + // It looks like a timestamp but for backward compatibility + // reasons we set it as a string, so that code that unmarshals + // timestamp-like values into interface{} will continue to + // see a string and not a time.Time. + // TODO(v3) Drop this. + out.Set(reflect.ValueOf(n.value)) + } else { + out.Set(reflect.ValueOf(resolved)) + } + return true + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + switch resolved := resolved.(type) { + case int: + if !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case int64: + if !out.OverflowInt(resolved) { + out.SetInt(resolved) + return true + } + case uint64: + if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case float64: + if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case string: + if out.Type() == durationType { + d, err := time.ParseDuration(resolved) + if err == nil { + out.SetInt(int64(d)) + return true + } + } + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + switch resolved := resolved.(type) { + case int: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case int64: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case uint64: + if !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case float64: + if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + } + case reflect.Bool: + switch resolved := resolved.(type) { + case bool: + out.SetBool(resolved) + return true + } + case reflect.Float32, reflect.Float64: + switch resolved := resolved.(type) { + case int: + out.SetFloat(float64(resolved)) + return true + case int64: + out.SetFloat(float64(resolved)) + return true + case uint64: + out.SetFloat(float64(resolved)) + return true + case float64: + out.SetFloat(resolved) + return true + } + case reflect.Struct: + if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { + out.Set(resolvedv) + return true + } + case reflect.Ptr: + if out.Type().Elem() == reflect.TypeOf(resolved) { + // TODO DOes this make sense? When is out a Ptr except when decoding a nil value? + elem := reflect.New(out.Type().Elem()) + elem.Elem().Set(reflect.ValueOf(resolved)) + out.Set(elem) + return true + } + } + d.terror(n, tag, out) + return false +} + +func settableValueOf(i interface{}) reflect.Value { + v := reflect.ValueOf(i) + sv := reflect.New(v.Type()).Elem() + sv.Set(v) + return sv +} + +func (d *decoder) sequence(n *node, out reflect.Value) (good bool) { + l := len(n.children) + + var iface reflect.Value + switch out.Kind() { + case reflect.Slice: + out.Set(reflect.MakeSlice(out.Type(), l, l)) + case reflect.Array: + if l != out.Len() { + failf("invalid array: want %d elements but got %d", out.Len(), l) + } + case reflect.Interface: + // No type hints. Will have to use a generic sequence. + iface = out + out = settableValueOf(make([]interface{}, l)) + default: + d.terror(n, yaml_SEQ_TAG, out) + return false + } + et := out.Type().Elem() + + j := 0 + for i := 0; i < l; i++ { + e := reflect.New(et).Elem() + if ok := d.unmarshal(n.children[i], e); ok { + out.Index(j).Set(e) + j++ + } + } + if out.Kind() != reflect.Array { + out.Set(out.Slice(0, j)) + } + if iface.IsValid() { + iface.Set(out) + } + return true +} + +func (d *decoder) mapping(n *node, out reflect.Value) (good bool) { + switch out.Kind() { + case reflect.Struct: + return d.mappingStruct(n, out) + case reflect.Slice: + return d.mappingSlice(n, out) + case reflect.Map: + // okay + case reflect.Interface: + if d.mapType.Kind() == reflect.Map { + iface := out + out = reflect.MakeMap(d.mapType) + iface.Set(out) + } else { + slicev := reflect.New(d.mapType).Elem() + if !d.mappingSlice(n, slicev) { + return false + } + out.Set(slicev) + return true + } + default: + d.terror(n, yaml_MAP_TAG, out) + return false + } + outt := out.Type() + kt := outt.Key() + et := outt.Elem() + + mapType := d.mapType + if outt.Key() == ifaceType && outt.Elem() == ifaceType { + d.mapType = outt + } + + if out.IsNil() { + out.Set(reflect.MakeMap(outt)) + } + l := len(n.children) + for i := 0; i < l; i += 2 { + if isMerge(n.children[i]) { + d.merge(n.children[i+1], out) + continue + } + k := reflect.New(kt).Elem() + if d.unmarshal(n.children[i], k) { + kkind := k.Kind() + if kkind == reflect.Interface { + kkind = k.Elem().Kind() + } + if kkind == reflect.Map || kkind == reflect.Slice { + failf("invalid map key: %#v", k.Interface()) + } + e := reflect.New(et).Elem() + if d.unmarshal(n.children[i+1], e) { + d.setMapIndex(n.children[i+1], out, k, e) + } + } + } + d.mapType = mapType + return true +} + +func (d *decoder) setMapIndex(n *node, out, k, v reflect.Value) { + if d.strict && out.MapIndex(k) != zeroValue { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: key %#v already set in map", n.line+1, k.Interface())) + return + } + out.SetMapIndex(k, v) +} + +func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) { + outt := out.Type() + if outt.Elem() != mapItemType { + d.terror(n, yaml_MAP_TAG, out) + return false + } + + mapType := d.mapType + d.mapType = outt + + var slice []MapItem + var l = len(n.children) + for i := 0; i < l; i += 2 { + if isMerge(n.children[i]) { + d.merge(n.children[i+1], out) + continue + } + item := MapItem{} + k := reflect.ValueOf(&item.Key).Elem() + if d.unmarshal(n.children[i], k) { + v := reflect.ValueOf(&item.Value).Elem() + if d.unmarshal(n.children[i+1], v) { + slice = append(slice, item) + } + } + } + out.Set(reflect.ValueOf(slice)) + d.mapType = mapType + return true +} + +func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) { + sinfo, err := getStructInfo(out.Type()) + if err != nil { + panic(err) + } + name := settableValueOf("") + l := len(n.children) + + var inlineMap reflect.Value + var elemType reflect.Type + if sinfo.InlineMap != -1 { + inlineMap = out.Field(sinfo.InlineMap) + inlineMap.Set(reflect.New(inlineMap.Type()).Elem()) + elemType = inlineMap.Type().Elem() + } + + var doneFields []bool + if d.strict { + doneFields = make([]bool, len(sinfo.FieldsList)) + } + for i := 0; i < l; i += 2 { + ni := n.children[i] + if isMerge(ni) { + d.merge(n.children[i+1], out) + continue + } + if !d.unmarshal(ni, name) { + continue + } + if info, ok := sinfo.FieldsMap[name.String()]; ok { + if d.strict { + if doneFields[info.Id] { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.line+1, name.String(), out.Type())) + continue + } + doneFields[info.Id] = true + } + var field reflect.Value + if info.Inline == nil { + field = out.Field(info.Num) + } else { + field = out.FieldByIndex(info.Inline) + } + d.unmarshal(n.children[i+1], field) + } else if sinfo.InlineMap != -1 { + if inlineMap.IsNil() { + inlineMap.Set(reflect.MakeMap(inlineMap.Type())) + } + value := reflect.New(elemType).Elem() + d.unmarshal(n.children[i+1], value) + d.setMapIndex(n.children[i+1], inlineMap, name, value) + } else if d.strict { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.line+1, name.String(), out.Type())) + } + } + return true +} + +func failWantMap() { + failf("map merge requires map or sequence of maps as the value") +} + +func (d *decoder) merge(n *node, out reflect.Value) { + switch n.kind { + case mappingNode: + d.unmarshal(n, out) + case aliasNode: + if n.alias != nil && n.alias.kind != mappingNode { + failWantMap() + } + d.unmarshal(n, out) + case sequenceNode: + // Step backwards as earlier nodes take precedence. + for i := len(n.children) - 1; i >= 0; i-- { + ni := n.children[i] + if ni.kind == aliasNode { + if ni.alias != nil && ni.alias.kind != mappingNode { + failWantMap() + } + } else if ni.kind != mappingNode { + failWantMap() + } + d.unmarshal(ni, out) + } + default: + failWantMap() + } +} + +func isMerge(n *node) bool { + return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG) +} diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/emitterc.go b/vendor/sigs.k8s.io/yaml/goyaml.v2/emitterc.go new file mode 100644 index 00000000..a1c2cc52 --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/goyaml.v2/emitterc.go @@ -0,0 +1,1685 @@ +package yaml + +import ( + "bytes" + "fmt" +) + +// Flush the buffer if needed. +func flush(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) { + return yaml_emitter_flush(emitter) + } + return true +} + +// Put a character to the output buffer. +func put(emitter *yaml_emitter_t, value byte) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + emitter.buffer[emitter.buffer_pos] = value + emitter.buffer_pos++ + emitter.column++ + return true +} + +// Put a line break to the output buffer. +func put_break(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + switch emitter.line_break { + case yaml_CR_BREAK: + emitter.buffer[emitter.buffer_pos] = '\r' + emitter.buffer_pos += 1 + case yaml_LN_BREAK: + emitter.buffer[emitter.buffer_pos] = '\n' + emitter.buffer_pos += 1 + case yaml_CRLN_BREAK: + emitter.buffer[emitter.buffer_pos+0] = '\r' + emitter.buffer[emitter.buffer_pos+1] = '\n' + emitter.buffer_pos += 2 + default: + panic("unknown line break setting") + } + emitter.column = 0 + emitter.line++ + return true +} + +// Copy a character from a string into buffer. +func write(emitter *yaml_emitter_t, s []byte, i *int) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + p := emitter.buffer_pos + w := width(s[*i]) + switch w { + case 4: + emitter.buffer[p+3] = s[*i+3] + fallthrough + case 3: + emitter.buffer[p+2] = s[*i+2] + fallthrough + case 2: + emitter.buffer[p+1] = s[*i+1] + fallthrough + case 1: + emitter.buffer[p+0] = s[*i+0] + default: + panic("unknown character width") + } + emitter.column++ + emitter.buffer_pos += w + *i += w + return true +} + +// Write a whole string into buffer. +func write_all(emitter *yaml_emitter_t, s []byte) bool { + for i := 0; i < len(s); { + if !write(emitter, s, &i) { + return false + } + } + return true +} + +// Copy a line break character from a string into buffer. +func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool { + if s[*i] == '\n' { + if !put_break(emitter) { + return false + } + *i++ + } else { + if !write(emitter, s, i) { + return false + } + emitter.column = 0 + emitter.line++ + } + return true +} + +// Set an emitter error and return false. +func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_EMITTER_ERROR + emitter.problem = problem + return false +} + +// Emit an event. +func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.events = append(emitter.events, *event) + for !yaml_emitter_need_more_events(emitter) { + event := &emitter.events[emitter.events_head] + if !yaml_emitter_analyze_event(emitter, event) { + return false + } + if !yaml_emitter_state_machine(emitter, event) { + return false + } + yaml_event_delete(event) + emitter.events_head++ + } + return true +} + +// Check if we need to accumulate more events before emitting. +// +// We accumulate extra +// - 1 event for DOCUMENT-START +// - 2 events for SEQUENCE-START +// - 3 events for MAPPING-START +// +func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool { + if emitter.events_head == len(emitter.events) { + return true + } + var accumulate int + switch emitter.events[emitter.events_head].typ { + case yaml_DOCUMENT_START_EVENT: + accumulate = 1 + break + case yaml_SEQUENCE_START_EVENT: + accumulate = 2 + break + case yaml_MAPPING_START_EVENT: + accumulate = 3 + break + default: + return false + } + if len(emitter.events)-emitter.events_head > accumulate { + return false + } + var level int + for i := emitter.events_head; i < len(emitter.events); i++ { + switch emitter.events[i].typ { + case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT: + level++ + case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT: + level-- + } + if level == 0 { + return false + } + } + return true +} + +// Append a directive to the directives stack. +func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool { + for i := 0; i < len(emitter.tag_directives); i++ { + if bytes.Equal(value.handle, emitter.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive") + } + } + + // [Go] Do we actually need to copy this given garbage collection + // and the lack of deallocating destructors? + tag_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(tag_copy.handle, value.handle) + copy(tag_copy.prefix, value.prefix) + emitter.tag_directives = append(emitter.tag_directives, tag_copy) + return true +} + +// Increase the indentation level. +func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool { + emitter.indents = append(emitter.indents, emitter.indent) + if emitter.indent < 0 { + if flow { + emitter.indent = emitter.best_indent + } else { + emitter.indent = 0 + } + } else if !indentless { + emitter.indent += emitter.best_indent + } + return true +} + +// State dispatcher. +func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool { + switch emitter.state { + default: + case yaml_EMIT_STREAM_START_STATE: + return yaml_emitter_emit_stream_start(emitter, event) + + case yaml_EMIT_FIRST_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, true) + + case yaml_EMIT_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, false) + + case yaml_EMIT_DOCUMENT_CONTENT_STATE: + return yaml_emitter_emit_document_content(emitter, event) + + case yaml_EMIT_DOCUMENT_END_STATE: + return yaml_emitter_emit_document_end(emitter, event) + + case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, true) + + case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, false) + + case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, true) + + case yaml_EMIT_FLOW_MAPPING_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, false) + + case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, true) + + case yaml_EMIT_FLOW_MAPPING_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, false) + + case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, true) + + case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, false) + + case yaml_EMIT_END_STATE: + return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END") + } + panic("invalid emitter state") +} + +// Expect STREAM-START. +func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_STREAM_START_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START") + } + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = event.encoding + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = yaml_UTF8_ENCODING + } + } + if emitter.best_indent < 2 || emitter.best_indent > 9 { + emitter.best_indent = 2 + } + if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 { + emitter.best_width = 80 + } + if emitter.best_width < 0 { + emitter.best_width = 1<<31 - 1 + } + if emitter.line_break == yaml_ANY_BREAK { + emitter.line_break = yaml_LN_BREAK + } + + emitter.indent = -1 + emitter.line = 0 + emitter.column = 0 + emitter.whitespace = true + emitter.indention = true + + if emitter.encoding != yaml_UTF8_ENCODING { + if !yaml_emitter_write_bom(emitter) { + return false + } + } + emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE + return true +} + +// Expect DOCUMENT-START or STREAM-END. +func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + + if event.typ == yaml_DOCUMENT_START_EVENT { + + if event.version_directive != nil { + if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) { + return false + } + } + + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) { + return false + } + if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) { + return false + } + } + + for i := 0; i < len(default_tag_directives); i++ { + tag_directive := &default_tag_directives[i] + if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) { + return false + } + } + + implicit := event.implicit + if !first || emitter.canonical { + implicit = false + } + + if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if event.version_directive != nil { + implicit = false + if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if len(event.tag_directives) > 0 { + implicit = false + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) { + return false + } + if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + if yaml_emitter_check_empty_document(emitter) { + implicit = false + } + if !implicit { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) { + return false + } + if emitter.canonical { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE + return true + } + + if event.typ == yaml_STREAM_END_EVENT { + if emitter.open_ended { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_END_STATE + return true + } + + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END") +} + +// Expect the root node. +func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE) + return yaml_emitter_emit_node(emitter, event, true, false, false, false) +} + +// Expect DOCUMENT-END. +func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_DOCUMENT_END_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END") + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !event.implicit { + // [Go] Allocate the slice elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_DOCUMENT_START_STATE + emitter.tag_directives = emitter.tag_directives[:0] + return true +} + +// Expect a flow item node. +func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_SEQUENCE_END_EVENT { + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + + return true + } + + if !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE) + return yaml_emitter_emit_node(emitter, event, false, true, false, false) +} + +// Expect a flow key node. +func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_MAPPING_END_EVENT { + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + + if !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if !emitter.canonical && yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a flow value node. +func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a block item node. +func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, emitter.mapping_context && !emitter.indention) { + return false + } + } + if event.typ == yaml_SEQUENCE_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE) + return yaml_emitter_emit_node(emitter, event, false, true, false, false) +} + +// Expect a block key node. +func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, false) { + return false + } + } + if event.typ == yaml_MAPPING_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a block value node. +func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a node. +func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t, + root bool, sequence bool, mapping bool, simple_key bool) bool { + + emitter.root_context = root + emitter.sequence_context = sequence + emitter.mapping_context = mapping + emitter.simple_key_context = simple_key + + switch event.typ { + case yaml_ALIAS_EVENT: + return yaml_emitter_emit_alias(emitter, event) + case yaml_SCALAR_EVENT: + return yaml_emitter_emit_scalar(emitter, event) + case yaml_SEQUENCE_START_EVENT: + return yaml_emitter_emit_sequence_start(emitter, event) + case yaml_MAPPING_START_EVENT: + return yaml_emitter_emit_mapping_start(emitter, event) + default: + return yaml_emitter_set_emitter_error(emitter, + fmt.Sprintf("expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS, but got %v", event.typ)) + } +} + +// Expect ALIAS. +func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SCALAR. +func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_select_scalar_style(emitter, event) { + return false + } + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + if !yaml_emitter_process_scalar(emitter) { + return false + } + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SEQUENCE-START. +func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE || + yaml_emitter_check_empty_sequence(emitter) { + emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE + } + return true +} + +// Expect MAPPING-START. +func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE || + yaml_emitter_check_empty_mapping(emitter) { + emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE + } + return true +} + +// Check if the document content is an empty scalar. +func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool { + return false // [Go] Huh? +} + +// Check if the next events represent an empty sequence. +func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT +} + +// Check if the next events represent an empty mapping. +func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT +} + +// Check if the next node can be expressed as a simple key. +func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool { + length := 0 + switch emitter.events[emitter.events_head].typ { + case yaml_ALIAS_EVENT: + length += len(emitter.anchor_data.anchor) + case yaml_SCALAR_EVENT: + if emitter.scalar_data.multiline { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + + len(emitter.scalar_data.value) + case yaml_SEQUENCE_START_EVENT: + if !yaml_emitter_check_empty_sequence(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + case yaml_MAPPING_START_EVENT: + if !yaml_emitter_check_empty_mapping(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + default: + return false + } + return length <= 128 +} + +// Determine an acceptable scalar style. +func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 + if no_tag && !event.implicit && !event.quoted_implicit { + return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified") + } + + style := event.scalar_style() + if style == yaml_ANY_SCALAR_STYLE { + style = yaml_PLAIN_SCALAR_STYLE + } + if emitter.canonical { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + if emitter.simple_key_context && emitter.scalar_data.multiline { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + + if style == yaml_PLAIN_SCALAR_STYLE { + if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed || + emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if no_tag && !event.implicit { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_SINGLE_QUOTED_SCALAR_STYLE { + if !emitter.scalar_data.single_quoted_allowed { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE { + if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + + if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE { + emitter.tag_data.handle = []byte{'!'} + } + emitter.scalar_data.style = style + return true +} + +// Write an anchor. +func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool { + if emitter.anchor_data.anchor == nil { + return true + } + c := []byte{'&'} + if emitter.anchor_data.alias { + c[0] = '*' + } + if !yaml_emitter_write_indicator(emitter, c, true, false, false) { + return false + } + return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor) +} + +// Write a tag. +func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool { + if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 { + return true + } + if len(emitter.tag_data.handle) > 0 { + if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) { + return false + } + if len(emitter.tag_data.suffix) > 0 { + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + } + } else { + // [Go] Allocate these slices elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) { + return false + } + } + return true +} + +// Write a scalar. +func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool { + switch emitter.scalar_data.style { + case yaml_PLAIN_SCALAR_STYLE: + return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_SINGLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_DOUBLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_LITERAL_SCALAR_STYLE: + return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value) + + case yaml_FOLDED_SCALAR_STYLE: + return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value) + } + panic("unknown scalar style") +} + +// Check if a %YAML directive is valid. +func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool { + if version_directive.major != 1 || version_directive.minor != 1 { + return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive") + } + return true +} + +// Check if a %TAG directive is valid. +func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool { + handle := tag_directive.handle + prefix := tag_directive.prefix + if len(handle) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty") + } + if handle[0] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'") + } + if handle[len(handle)-1] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'") + } + for i := 1; i < len(handle)-1; i += width(handle[i]) { + if !is_alpha(handle, i) { + return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only") + } + } + if len(prefix) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty") + } + return true +} + +// Check if an anchor is valid. +func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool { + if len(anchor) == 0 { + problem := "anchor value must not be empty" + if alias { + problem = "alias value must not be empty" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + for i := 0; i < len(anchor); i += width(anchor[i]) { + if !is_alpha(anchor, i) { + problem := "anchor value must contain alphanumerical characters only" + if alias { + problem = "alias value must contain alphanumerical characters only" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + } + emitter.anchor_data.anchor = anchor + emitter.anchor_data.alias = alias + return true +} + +// Check if a tag is valid. +func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool { + if len(tag) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty") + } + for i := 0; i < len(emitter.tag_directives); i++ { + tag_directive := &emitter.tag_directives[i] + if bytes.HasPrefix(tag, tag_directive.prefix) { + emitter.tag_data.handle = tag_directive.handle + emitter.tag_data.suffix = tag[len(tag_directive.prefix):] + return true + } + } + emitter.tag_data.suffix = tag + return true +} + +// Check if a scalar is valid. +func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { + var ( + block_indicators = false + flow_indicators = false + line_breaks = false + special_characters = false + + leading_space = false + leading_break = false + trailing_space = false + trailing_break = false + break_space = false + space_break = false + + preceded_by_whitespace = false + followed_by_whitespace = false + previous_space = false + previous_break = false + ) + + emitter.scalar_data.value = value + + if len(value) == 0 { + emitter.scalar_data.multiline = false + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = false + return true + } + + if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) { + block_indicators = true + flow_indicators = true + } + + preceded_by_whitespace = true + for i, w := 0, 0; i < len(value); i += w { + w = width(value[i]) + followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w) + + if i == 0 { + switch value[i] { + case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`': + flow_indicators = true + block_indicators = true + case '?', ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '-': + if followed_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } else { + switch value[i] { + case ',', '?', '[', ']', '{', '}': + flow_indicators = true + case ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '#': + if preceded_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } + + if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode { + special_characters = true + } + if is_space(value, i) { + if i == 0 { + leading_space = true + } + if i+width(value[i]) == len(value) { + trailing_space = true + } + if previous_break { + break_space = true + } + previous_space = true + previous_break = false + } else if is_break(value, i) { + line_breaks = true + if i == 0 { + leading_break = true + } + if i+width(value[i]) == len(value) { + trailing_break = true + } + if previous_space { + space_break = true + } + previous_space = false + previous_break = true + } else { + previous_space = false + previous_break = false + } + + // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition. + preceded_by_whitespace = is_blankz(value, i) + } + + emitter.scalar_data.multiline = line_breaks + emitter.scalar_data.flow_plain_allowed = true + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = true + + if leading_space || leading_break || trailing_space || trailing_break { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if trailing_space { + emitter.scalar_data.block_allowed = false + } + if break_space { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + } + if space_break || special_characters { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + emitter.scalar_data.block_allowed = false + } + if line_breaks { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if flow_indicators { + emitter.scalar_data.flow_plain_allowed = false + } + if block_indicators { + emitter.scalar_data.block_plain_allowed = false + } + return true +} + +// Check if the event data is valid. +func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + emitter.anchor_data.anchor = nil + emitter.tag_data.handle = nil + emitter.tag_data.suffix = nil + emitter.scalar_data.value = nil + + switch event.typ { + case yaml_ALIAS_EVENT: + if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) { + return false + } + + case yaml_SCALAR_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + if !yaml_emitter_analyze_scalar(emitter, event.value) { + return false + } + + case yaml_SEQUENCE_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + + case yaml_MAPPING_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + } + return true +} + +// Write the BOM character. +func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool { + if !flush(emitter) { + return false + } + pos := emitter.buffer_pos + emitter.buffer[pos+0] = '\xEF' + emitter.buffer[pos+1] = '\xBB' + emitter.buffer[pos+2] = '\xBF' + emitter.buffer_pos += 3 + return true +} + +func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool { + indent := emitter.indent + if indent < 0 { + indent = 0 + } + if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) { + if !put_break(emitter) { + return false + } + } + for emitter.column < indent { + if !put(emitter, ' ') { + return false + } + } + emitter.whitespace = true + emitter.indention = true + return true +} + +func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, indicator) { + return false + } + emitter.whitespace = is_whitespace + emitter.indention = (emitter.indention && is_indention) + emitter.open_ended = false + return true +} + +func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool { + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool { + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + for i := 0; i < len(value); { + var must_write bool + switch value[i] { + case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']': + must_write = true + default: + must_write = is_alpha(value, i) + } + if must_write { + if !write(emitter, value, &i) { + return false + } + } else { + w := width(value[i]) + for k := 0; k < w; k++ { + octet := value[i] + i++ + if !put(emitter, '%') { + return false + } + + c := octet >> 4 + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + + c = octet & 0x0f + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + } + } + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + + emitter.whitespace = false + emitter.indention = false + if emitter.root_context { + emitter.open_ended = true + } + + return true +} + +func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) { + return false + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if value[i] == '\'' { + if !put(emitter, '\'') { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + spaces := false + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) { + return false + } + + for i := 0; i < len(value); { + if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) || + is_bom(value, i) || is_break(value, i) || + value[i] == '"' || value[i] == '\\' { + + octet := value[i] + + var w int + var v rune + switch { + case octet&0x80 == 0x00: + w, v = 1, rune(octet&0x7F) + case octet&0xE0 == 0xC0: + w, v = 2, rune(octet&0x1F) + case octet&0xF0 == 0xE0: + w, v = 3, rune(octet&0x0F) + case octet&0xF8 == 0xF0: + w, v = 4, rune(octet&0x07) + } + for k := 1; k < w; k++ { + octet = value[i+k] + v = (v << 6) + (rune(octet) & 0x3F) + } + i += w + + if !put(emitter, '\\') { + return false + } + + var ok bool + switch v { + case 0x00: + ok = put(emitter, '0') + case 0x07: + ok = put(emitter, 'a') + case 0x08: + ok = put(emitter, 'b') + case 0x09: + ok = put(emitter, 't') + case 0x0A: + ok = put(emitter, 'n') + case 0x0b: + ok = put(emitter, 'v') + case 0x0c: + ok = put(emitter, 'f') + case 0x0d: + ok = put(emitter, 'r') + case 0x1b: + ok = put(emitter, 'e') + case 0x22: + ok = put(emitter, '"') + case 0x5c: + ok = put(emitter, '\\') + case 0x85: + ok = put(emitter, 'N') + case 0xA0: + ok = put(emitter, '_') + case 0x2028: + ok = put(emitter, 'L') + case 0x2029: + ok = put(emitter, 'P') + default: + if v <= 0xFF { + ok = put(emitter, 'x') + w = 2 + } else if v <= 0xFFFF { + ok = put(emitter, 'u') + w = 4 + } else { + ok = put(emitter, 'U') + w = 8 + } + for k := (w - 1) * 4; ok && k >= 0; k -= 4 { + digit := byte((v >> uint(k)) & 0x0F) + if digit < 10 { + ok = put(emitter, digit+'0') + } else { + ok = put(emitter, digit+'A'-10) + } + } + } + if !ok { + return false + } + spaces = false + } else if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 { + if !yaml_emitter_write_indent(emitter) { + return false + } + if is_space(value, i+1) { + if !put(emitter, '\\') { + return false + } + } + i += width(value[i]) + } else if !write(emitter, value, &i) { + return false + } + spaces = true + } else { + if !write(emitter, value, &i) { + return false + } + spaces = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool { + if is_space(value, 0) || is_break(value, 0) { + indent_hint := []byte{'0' + byte(emitter.best_indent)} + if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) { + return false + } + } + + emitter.open_ended = false + + var chomp_hint [1]byte + if len(value) == 0 { + chomp_hint[0] = '-' + } else { + i := len(value) - 1 + for value[i]&0xC0 == 0x80 { + i-- + } + if !is_break(value, i) { + chomp_hint[0] = '-' + } else if i == 0 { + chomp_hint[0] = '+' + emitter.open_ended = true + } else { + i-- + for value[i]&0xC0 == 0x80 { + i-- + } + if is_break(value, i) { + chomp_hint[0] = '+' + emitter.open_ended = true + } + } + } + if chomp_hint[0] != 0 { + if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) { + return false + } + } + return true +} + +func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + if !put_break(emitter) { + return false + } + emitter.indention = true + emitter.whitespace = true + breaks := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + breaks = false + } + } + + return true +} + +func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + + if !put_break(emitter) { + return false + } + emitter.indention = true + emitter.whitespace = true + + breaks := true + leading_spaces := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !breaks && !leading_spaces && value[i] == '\n' { + k := 0 + for is_break(value, k) { + k += width(value[k]) + } + if !is_blankz(value, k) { + if !put_break(emitter) { + return false + } + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + leading_spaces = is_blank(value, i) + } + if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + emitter.indention = false + breaks = false + } + } + return true +} diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/encode.go b/vendor/sigs.k8s.io/yaml/goyaml.v2/encode.go new file mode 100644 index 00000000..0ee738e1 --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/goyaml.v2/encode.go @@ -0,0 +1,390 @@ +package yaml + +import ( + "encoding" + "fmt" + "io" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "time" + "unicode/utf8" +) + +// jsonNumber is the interface of the encoding/json.Number datatype. +// Repeating the interface here avoids a dependency on encoding/json, and also +// supports other libraries like jsoniter, which use a similar datatype with +// the same interface. Detecting this interface is useful when dealing with +// structures containing json.Number, which is a string under the hood. The +// encoder should prefer the use of Int64(), Float64() and string(), in that +// order, when encoding this type. +type jsonNumber interface { + Float64() (float64, error) + Int64() (int64, error) + String() string +} + +type encoder struct { + emitter yaml_emitter_t + event yaml_event_t + out []byte + flow bool + // doneInit holds whether the initial stream_start_event has been + // emitted. + doneInit bool +} + +func newEncoder() *encoder { + e := &encoder{} + yaml_emitter_initialize(&e.emitter) + yaml_emitter_set_output_string(&e.emitter, &e.out) + yaml_emitter_set_unicode(&e.emitter, true) + return e +} + +func newEncoderWithWriter(w io.Writer) *encoder { + e := &encoder{} + yaml_emitter_initialize(&e.emitter) + yaml_emitter_set_output_writer(&e.emitter, w) + yaml_emitter_set_unicode(&e.emitter, true) + return e +} + +func (e *encoder) init() { + if e.doneInit { + return + } + yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING) + e.emit() + e.doneInit = true +} + +func (e *encoder) finish() { + e.emitter.open_ended = false + yaml_stream_end_event_initialize(&e.event) + e.emit() +} + +func (e *encoder) destroy() { + yaml_emitter_delete(&e.emitter) +} + +func (e *encoder) emit() { + // This will internally delete the e.event value. + e.must(yaml_emitter_emit(&e.emitter, &e.event)) +} + +func (e *encoder) must(ok bool) { + if !ok { + msg := e.emitter.problem + if msg == "" { + msg = "unknown problem generating YAML content" + } + failf("%s", msg) + } +} + +func (e *encoder) marshalDoc(tag string, in reflect.Value) { + e.init() + yaml_document_start_event_initialize(&e.event, nil, nil, true) + e.emit() + e.marshal(tag, in) + yaml_document_end_event_initialize(&e.event, true) + e.emit() +} + +func (e *encoder) marshal(tag string, in reflect.Value) { + if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() { + e.nilv() + return + } + iface := in.Interface() + switch m := iface.(type) { + case jsonNumber: + integer, err := m.Int64() + if err == nil { + // In this case the json.Number is a valid int64 + in = reflect.ValueOf(integer) + break + } + float, err := m.Float64() + if err == nil { + // In this case the json.Number is a valid float64 + in = reflect.ValueOf(float) + break + } + // fallback case - no number could be obtained + in = reflect.ValueOf(m.String()) + case time.Time, *time.Time: + // Although time.Time implements TextMarshaler, + // we don't want to treat it as a string for YAML + // purposes because YAML has special support for + // timestamps. + case Marshaler: + v, err := m.MarshalYAML() + if err != nil { + fail(err) + } + if v == nil { + e.nilv() + return + } + in = reflect.ValueOf(v) + case encoding.TextMarshaler: + text, err := m.MarshalText() + if err != nil { + fail(err) + } + in = reflect.ValueOf(string(text)) + case nil: + e.nilv() + return + } + switch in.Kind() { + case reflect.Interface: + e.marshal(tag, in.Elem()) + case reflect.Map: + e.mapv(tag, in) + case reflect.Ptr: + if in.Type() == ptrTimeType { + e.timev(tag, in.Elem()) + } else { + e.marshal(tag, in.Elem()) + } + case reflect.Struct: + if in.Type() == timeType { + e.timev(tag, in) + } else { + e.structv(tag, in) + } + case reflect.Slice, reflect.Array: + if in.Type().Elem() == mapItemType { + e.itemsv(tag, in) + } else { + e.slicev(tag, in) + } + case reflect.String: + e.stringv(tag, in) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + if in.Type() == durationType { + e.stringv(tag, reflect.ValueOf(iface.(time.Duration).String())) + } else { + e.intv(tag, in) + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + e.uintv(tag, in) + case reflect.Float32, reflect.Float64: + e.floatv(tag, in) + case reflect.Bool: + e.boolv(tag, in) + default: + panic("cannot marshal type: " + in.Type().String()) + } +} + +func (e *encoder) mapv(tag string, in reflect.Value) { + e.mappingv(tag, func() { + keys := keyList(in.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + e.marshal("", k) + e.marshal("", in.MapIndex(k)) + } + }) +} + +func (e *encoder) itemsv(tag string, in reflect.Value) { + e.mappingv(tag, func() { + slice := in.Convert(reflect.TypeOf([]MapItem{})).Interface().([]MapItem) + for _, item := range slice { + e.marshal("", reflect.ValueOf(item.Key)) + e.marshal("", reflect.ValueOf(item.Value)) + } + }) +} + +func (e *encoder) structv(tag string, in reflect.Value) { + sinfo, err := getStructInfo(in.Type()) + if err != nil { + panic(err) + } + e.mappingv(tag, func() { + for _, info := range sinfo.FieldsList { + var value reflect.Value + if info.Inline == nil { + value = in.Field(info.Num) + } else { + value = in.FieldByIndex(info.Inline) + } + if info.OmitEmpty && isZero(value) { + continue + } + e.marshal("", reflect.ValueOf(info.Key)) + e.flow = info.Flow + e.marshal("", value) + } + if sinfo.InlineMap >= 0 { + m := in.Field(sinfo.InlineMap) + if m.Len() > 0 { + e.flow = false + keys := keyList(m.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + if _, found := sinfo.FieldsMap[k.String()]; found { + panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", k.String())) + } + e.marshal("", k) + e.flow = false + e.marshal("", m.MapIndex(k)) + } + } + } + }) +} + +func (e *encoder) mappingv(tag string, f func()) { + implicit := tag == "" + style := yaml_BLOCK_MAPPING_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_MAPPING_STYLE + } + yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style) + e.emit() + f() + yaml_mapping_end_event_initialize(&e.event) + e.emit() +} + +func (e *encoder) slicev(tag string, in reflect.Value) { + implicit := tag == "" + style := yaml_BLOCK_SEQUENCE_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_SEQUENCE_STYLE + } + e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) + e.emit() + n := in.Len() + for i := 0; i < n; i++ { + e.marshal("", in.Index(i)) + } + e.must(yaml_sequence_end_event_initialize(&e.event)) + e.emit() +} + +// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1. +// +// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported +// in YAML 1.2 and by this package, but these should be marshalled quoted for +// the time being for compatibility with other parsers. +func isBase60Float(s string) (result bool) { + // Fast path. + if s == "" { + return false + } + c := s[0] + if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 { + return false + } + // Do the full match. + return base60float.MatchString(s) +} + +// From http://yaml.org/type/float.html, except the regular expression there +// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix. +var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`) + +func (e *encoder) stringv(tag string, in reflect.Value) { + var style yaml_scalar_style_t + s := in.String() + canUsePlain := true + switch { + case !utf8.ValidString(s): + if tag == yaml_BINARY_TAG { + failf("explicitly tagged !!binary data must be base64-encoded") + } + if tag != "" { + failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag)) + } + // It can't be encoded directly as YAML so use a binary tag + // and encode it as base64. + tag = yaml_BINARY_TAG + s = encodeBase64(s) + case tag == "": + // Check to see if it would resolve to a specific + // tag when encoded unquoted. If it doesn't, + // there's no need to quote it. + rtag, _ := resolve("", s) + canUsePlain = rtag == yaml_STR_TAG && !isBase60Float(s) + } + // Note: it's possible for user code to emit invalid YAML + // if they explicitly specify a tag and a string containing + // text that's incompatible with that tag. + switch { + case strings.Contains(s, "\n"): + style = yaml_LITERAL_SCALAR_STYLE + case canUsePlain: + style = yaml_PLAIN_SCALAR_STYLE + default: + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + e.emitScalar(s, "", tag, style) +} + +func (e *encoder) boolv(tag string, in reflect.Value) { + var s string + if in.Bool() { + s = "true" + } else { + s = "false" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) intv(tag string, in reflect.Value) { + s := strconv.FormatInt(in.Int(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) uintv(tag string, in reflect.Value) { + s := strconv.FormatUint(in.Uint(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) timev(tag string, in reflect.Value) { + t := in.Interface().(time.Time) + s := t.Format(time.RFC3339Nano) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) floatv(tag string, in reflect.Value) { + // Issue #352: When formatting, use the precision of the underlying value + precision := 64 + if in.Kind() == reflect.Float32 { + precision = 32 + } + + s := strconv.FormatFloat(in.Float(), 'g', -1, precision) + switch s { + case "+Inf": + s = ".inf" + case "-Inf": + s = "-.inf" + case "NaN": + s = ".nan" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) nilv() { + e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) { + implicit := tag == "" + e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style)) + e.emit() +} diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/parserc.go b/vendor/sigs.k8s.io/yaml/goyaml.v2/parserc.go new file mode 100644 index 00000000..81d05dfe --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/goyaml.v2/parserc.go @@ -0,0 +1,1095 @@ +package yaml + +import ( + "bytes" +) + +// The parser implements the following grammar: +// +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// implicit_document ::= block_node DOCUMENT-END* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// block_node_or_indentless_sequence ::= +// ALIAS +// | properties (block_content | indentless_block_sequence)? +// | block_content +// | indentless_block_sequence +// block_node ::= ALIAS +// | properties block_content? +// | block_content +// flow_node ::= ALIAS +// | properties flow_content? +// | flow_content +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// block_content ::= block_collection | flow_collection | SCALAR +// flow_content ::= flow_collection | SCALAR +// block_collection ::= block_sequence | block_mapping +// flow_collection ::= flow_sequence | flow_mapping +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// block_mapping ::= BLOCK-MAPPING_START +// ((KEY block_node_or_indentless_sequence?)? +// (VALUE block_node_or_indentless_sequence?)?)* +// BLOCK-END +// flow_sequence ::= FLOW-SEQUENCE-START +// (flow_sequence_entry FLOW-ENTRY)* +// flow_sequence_entry? +// FLOW-SEQUENCE-END +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// flow_mapping ::= FLOW-MAPPING-START +// (flow_mapping_entry FLOW-ENTRY)* +// flow_mapping_entry? +// FLOW-MAPPING-END +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + +// Peek the next token in the token queue. +func peek_token(parser *yaml_parser_t) *yaml_token_t { + if parser.token_available || yaml_parser_fetch_more_tokens(parser) { + return &parser.tokens[parser.tokens_head] + } + return nil +} + +// Remove the next token from the queue (must be called after peek_token). +func skip_token(parser *yaml_parser_t) { + parser.token_available = false + parser.tokens_parsed++ + parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN + parser.tokens_head++ +} + +// Get the next event. +func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool { + // Erase the event object. + *event = yaml_event_t{} + + // No events after the end of the stream or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE { + return true + } + + // Generate the next event. + return yaml_parser_state_machine(parser, event) +} + +// Set parser error. +func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +// State dispatcher. +func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool { + //trace("yaml_parser_state_machine", "state:", parser.state.String()) + + switch parser.state { + case yaml_PARSE_STREAM_START_STATE: + return yaml_parser_parse_stream_start(parser, event) + + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, true) + + case yaml_PARSE_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, false) + + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return yaml_parser_parse_document_content(parser, event) + + case yaml_PARSE_DOCUMENT_END_STATE: + return yaml_parser_parse_document_end(parser, event) + + case yaml_PARSE_BLOCK_NODE_STATE: + return yaml_parser_parse_node(parser, event, true, false) + + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return yaml_parser_parse_node(parser, event, true, true) + + case yaml_PARSE_FLOW_NODE_STATE: + return yaml_parser_parse_node(parser, event, false, false) + + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, true) + + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, false) + + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_indentless_sequence_entry(parser, event) + + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, true) + + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, false) + + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return yaml_parser_parse_block_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, true) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, false) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event) + + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, true) + + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, true) + + default: + panic("invalid parser state") + } +} + +// Parse the production: +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// ************ +func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_STREAM_START_TOKEN { + return yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark) + } + parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + encoding: token.encoding, + } + skip_token(parser) + return true +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// * +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// ************************* +func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool { + + token := peek_token(parser) + if token == nil { + return false + } + + // Parse extra document end indicators. + if !implicit { + for token.typ == yaml_DOCUMENT_END_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN && + token.typ != yaml_TAG_DIRECTIVE_TOKEN && + token.typ != yaml_DOCUMENT_START_TOKEN && + token.typ != yaml_STREAM_END_TOKEN { + // Parse an implicit document. + if !yaml_parser_process_directives(parser, nil, nil) { + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_BLOCK_NODE_STATE + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + } else if token.typ != yaml_STREAM_END_TOKEN { + // Parse an explicit document. + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + start_mark := token.start_mark + if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) { + return false + } + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_DOCUMENT_START_TOKEN { + yaml_parser_set_parser_error(parser, + "did not find expected ", token.start_mark) + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE + end_mark := token.end_mark + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: false, + } + skip_token(parser) + + } else { + // Parse the stream end. + parser.state = yaml_PARSE_END_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + } + + return true +} + +// Parse the productions: +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// *********** +// +func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN || + token.typ == yaml_TAG_DIRECTIVE_TOKEN || + token.typ == yaml_DOCUMENT_START_TOKEN || + token.typ == yaml_DOCUMENT_END_TOKEN || + token.typ == yaml_STREAM_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + return yaml_parser_process_empty_scalar(parser, event, + token.start_mark) + } + return yaml_parser_parse_node(parser, event, true, false) +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// ************* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// +func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + start_mark := token.start_mark + end_mark := token.start_mark + + implicit := true + if token.typ == yaml_DOCUMENT_END_TOKEN { + end_mark = token.end_mark + skip_token(parser) + implicit = false + } + + parser.tag_directives = parser.tag_directives[:0] + + parser.state = yaml_PARSE_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + start_mark: start_mark, + end_mark: end_mark, + implicit: implicit, + } + return true +} + +// Parse the productions: +// block_node_or_indentless_sequence ::= +// ALIAS +// ***** +// | properties (block_content | indentless_block_sequence)? +// ********** * +// | block_content | indentless_block_sequence +// * +// block_node ::= ALIAS +// ***** +// | properties block_content? +// ********** * +// | block_content +// * +// flow_node ::= ALIAS +// ***** +// | properties flow_content? +// ********** * +// | flow_content +// * +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// ************************* +// block_content ::= block_collection | flow_collection | SCALAR +// ****** +// flow_content ::= flow_collection | SCALAR +// ****** +func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool { + //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)() + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_ALIAS_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + *event = yaml_event_t{ + typ: yaml_ALIAS_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + anchor: token.value, + } + skip_token(parser) + return true + } + + start_mark := token.start_mark + end_mark := token.start_mark + + var tag_token bool + var tag_handle, tag_suffix, anchor []byte + var tag_mark yaml_mark_t + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + start_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } else if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + start_mark = token.start_mark + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + var tag []byte + if tag_token { + if len(tag_handle) == 0 { + tag = tag_suffix + tag_suffix = nil + } else { + for i := range parser.tag_directives { + if bytes.Equal(parser.tag_directives[i].handle, tag_handle) { + tag = append([]byte(nil), parser.tag_directives[i].prefix...) + tag = append(tag, tag_suffix...) + break + } + } + if len(tag) == 0 { + yaml_parser_set_parser_error_context(parser, + "while parsing a node", start_mark, + "found undefined tag handle", tag_mark) + return false + } + } + } + + implicit := len(tag) == 0 + if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_SCALAR_TOKEN { + var plain_implicit, quoted_implicit bool + end_mark = token.end_mark + if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') { + plain_implicit = true + } else if len(tag) == 0 { + quoted_implicit = true + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + value: token.value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(token.style), + } + skip_token(parser) + return true + } + if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN { + // [Go] Some of the events below can be merged as they differ only on style. + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_FLOW_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + return true + } + if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE), + } + return true + } + if len(anchor) > 0 || len(tag) > 0 { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + quoted_implicit: false, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true + } + + context := "while parsing a flow node" + if block { + context = "while parsing a block node" + } + yaml_parser_set_parser_error_context(parser, context, start_mark, + "did not find expected node content", token.start_mark) + return false +} + +// Parse the productions: +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// ******************** *********** * ********* +// +func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } else { + parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } + if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block collection", context_mark, + "did not find expected '-' indicator", token.start_mark) +} + +// Parse the productions: +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// *********** * +func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && + token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark? + } + return true +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// ******************* +// ((KEY block_node_or_indentless_sequence?)? +// *** * +// (VALUE block_node_or_indentless_sequence?)?)* +// +// BLOCK-END +// ********* +// +func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_KEY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } else { + parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } else if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block mapping", context_mark, + "did not find expected key", token.start_mark) +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// +// ((KEY block_node_or_indentless_sequence?)? +// +// (VALUE block_node_or_indentless_sequence?)?)* +// ***** * +// BLOCK-END +// +// +func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence ::= FLOW-SEQUENCE-START +// ******************* +// (flow_sequence_entry FLOW-ENTRY)* +// * ********** +// flow_sequence_entry? +// * +// FLOW-SEQUENCE-END +// ***************** +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow sequence", context_mark, + "did not find expected ',' or ']'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + implicit: true, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + skip_token(parser) + return true + } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true +} + +// +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// *** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + mark := token.end_mark + skip_token(parser) + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// ***** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be end_mark? + } + return true +} + +// Parse the productions: +// flow_mapping ::= FLOW-MAPPING-START +// ****************** +// (flow_mapping_entry FLOW-ENTRY)* +// * ********** +// flow_mapping_entry? +// ****************** +// FLOW-MAPPING-END +// **************** +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * *** * +// +func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow mapping", context_mark, + "did not find expected ',' or '}'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } else { + parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + return true +} + +// Parse the productions: +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * ***** * +// +func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool { + token := peek_token(parser) + if token == nil { + return false + } + if empty { + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Generate an empty scalar event. +func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: mark, + end_mark: mark, + value: nil, // Empty + implicit: true, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true +} + +var default_tag_directives = []yaml_tag_directive_t{ + {[]byte("!"), []byte("!")}, + {[]byte("!!"), []byte("tag:yaml.org,2002:")}, +} + +// Parse directives. +func yaml_parser_process_directives(parser *yaml_parser_t, + version_directive_ref **yaml_version_directive_t, + tag_directives_ref *[]yaml_tag_directive_t) bool { + + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + + token := peek_token(parser) + if token == nil { + return false + } + + for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN { + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN { + if version_directive != nil { + yaml_parser_set_parser_error(parser, + "found duplicate %YAML directive", token.start_mark) + return false + } + if token.major != 1 || token.minor != 1 { + yaml_parser_set_parser_error(parser, + "found incompatible YAML document", token.start_mark) + return false + } + version_directive = &yaml_version_directive_t{ + major: token.major, + minor: token.minor, + } + } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN { + value := yaml_tag_directive_t{ + handle: token.value, + prefix: token.prefix, + } + if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) { + return false + } + tag_directives = append(tag_directives, value) + } + + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + + for i := range default_tag_directives { + if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) { + return false + } + } + + if version_directive_ref != nil { + *version_directive_ref = version_directive + } + if tag_directives_ref != nil { + *tag_directives_ref = tag_directives + } + return true +} + +// Append a tag directive to the directives stack. +func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool { + for i := range parser.tag_directives { + if bytes.Equal(value.handle, parser.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark) + } + } + + // [Go] I suspect the copy is unnecessary. This was likely done + // because there was no way to track ownership of the data. + value_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(value_copy.handle, value.handle) + copy(value_copy.prefix, value.prefix) + parser.tag_directives = append(parser.tag_directives, value_copy) + return true +} diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/readerc.go b/vendor/sigs.k8s.io/yaml/goyaml.v2/readerc.go new file mode 100644 index 00000000..7c1f5fac --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/goyaml.v2/readerc.go @@ -0,0 +1,412 @@ +package yaml + +import ( + "io" +) + +// Set the reader error and return 0. +func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool { + parser.error = yaml_READER_ERROR + parser.problem = problem + parser.problem_offset = offset + parser.problem_value = value + return false +} + +// Byte order marks. +const ( + bom_UTF8 = "\xef\xbb\xbf" + bom_UTF16LE = "\xff\xfe" + bom_UTF16BE = "\xfe\xff" +) + +// Determine the input stream encoding by checking the BOM symbol. If no BOM is +// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure. +func yaml_parser_determine_encoding(parser *yaml_parser_t) bool { + // Ensure that we had enough bytes in the raw buffer. + for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 { + if !yaml_parser_update_raw_buffer(parser) { + return false + } + } + + // Determine the encoding. + buf := parser.raw_buffer + pos := parser.raw_buffer_pos + avail := len(buf) - pos + if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] { + parser.encoding = yaml_UTF16LE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] { + parser.encoding = yaml_UTF16BE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] { + parser.encoding = yaml_UTF8_ENCODING + parser.raw_buffer_pos += 3 + parser.offset += 3 + } else { + parser.encoding = yaml_UTF8_ENCODING + } + return true +} + +// Update the raw buffer. +func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool { + size_read := 0 + + // Return if the raw buffer is full. + if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) { + return true + } + + // Return on EOF. + if parser.eof { + return true + } + + // Move the remaining bytes in the raw buffer to the beginning. + if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) { + copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:]) + } + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos] + parser.raw_buffer_pos = 0 + + // Call the read handler to fill the buffer. + size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)]) + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read] + if err == io.EOF { + parser.eof = true + } else if err != nil { + return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1) + } + return true +} + +// Ensure that the buffer contains at least `length` characters. +// Return true on success, false on failure. +// +// The length is supposed to be significantly less that the buffer size. +func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { + if parser.read_handler == nil { + panic("read handler must be set") + } + + // [Go] This function was changed to guarantee the requested length size at EOF. + // The fact we need to do this is pretty awful, but the description above implies + // for that to be the case, and there are tests + + // If the EOF flag is set and the raw buffer is empty, do nothing. + if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { + // [Go] ACTUALLY! Read the documentation of this function above. + // This is just broken. To return true, we need to have the + // given length in the buffer. Not doing that means every single + // check that calls this function to make sure the buffer has a + // given length is Go) panicking; or C) accessing invalid memory. + //return true + } + + // Return if the buffer contains enough characters. + if parser.unread >= length { + return true + } + + // Determine the input encoding if it is not known yet. + if parser.encoding == yaml_ANY_ENCODING { + if !yaml_parser_determine_encoding(parser) { + return false + } + } + + // Move the unread characters to the beginning of the buffer. + buffer_len := len(parser.buffer) + if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len { + copy(parser.buffer, parser.buffer[parser.buffer_pos:]) + buffer_len -= parser.buffer_pos + parser.buffer_pos = 0 + } else if parser.buffer_pos == buffer_len { + buffer_len = 0 + parser.buffer_pos = 0 + } + + // Open the whole buffer for writing, and cut it before returning. + parser.buffer = parser.buffer[:cap(parser.buffer)] + + // Fill the buffer until it has enough characters. + first := true + for parser.unread < length { + + // Fill the raw buffer if necessary. + if !first || parser.raw_buffer_pos == len(parser.raw_buffer) { + if !yaml_parser_update_raw_buffer(parser) { + parser.buffer = parser.buffer[:buffer_len] + return false + } + } + first = false + + // Decode the raw buffer. + inner: + for parser.raw_buffer_pos != len(parser.raw_buffer) { + var value rune + var width int + + raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos + + // Decode the next character. + switch parser.encoding { + case yaml_UTF8_ENCODING: + // Decode a UTF-8 character. Check RFC 3629 + // (http://www.ietf.org/rfc/rfc3629.txt) for more details. + // + // The following table (taken from the RFC) is used for + // decoding. + // + // Char. number range | UTF-8 octet sequence + // (hexadecimal) | (binary) + // --------------------+------------------------------------ + // 0000 0000-0000 007F | 0xxxxxxx + // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx + // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx + // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + // + // Additionally, the characters in the range 0xD800-0xDFFF + // are prohibited as they are reserved for use with UTF-16 + // surrogate pairs. + + // Determine the length of the UTF-8 sequence. + octet := parser.raw_buffer[parser.raw_buffer_pos] + switch { + case octet&0x80 == 0x00: + width = 1 + case octet&0xE0 == 0xC0: + width = 2 + case octet&0xF0 == 0xE0: + width = 3 + case octet&0xF8 == 0xF0: + width = 4 + default: + // The leading octet is invalid. + return yaml_parser_set_reader_error(parser, + "invalid leading UTF-8 octet", + parser.offset, int(octet)) + } + + // Check if the raw buffer contains an incomplete character. + if width > raw_unread { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-8 octet sequence", + parser.offset, -1) + } + break inner + } + + // Decode the leading octet. + switch { + case octet&0x80 == 0x00: + value = rune(octet & 0x7F) + case octet&0xE0 == 0xC0: + value = rune(octet & 0x1F) + case octet&0xF0 == 0xE0: + value = rune(octet & 0x0F) + case octet&0xF8 == 0xF0: + value = rune(octet & 0x07) + default: + value = 0 + } + + // Check and decode the trailing octets. + for k := 1; k < width; k++ { + octet = parser.raw_buffer[parser.raw_buffer_pos+k] + + // Check if the octet is valid. + if (octet & 0xC0) != 0x80 { + return yaml_parser_set_reader_error(parser, + "invalid trailing UTF-8 octet", + parser.offset+k, int(octet)) + } + + // Decode the octet. + value = (value << 6) + rune(octet&0x3F) + } + + // Check the length of the sequence against the value. + switch { + case width == 1: + case width == 2 && value >= 0x80: + case width == 3 && value >= 0x800: + case width == 4 && value >= 0x10000: + default: + return yaml_parser_set_reader_error(parser, + "invalid length of a UTF-8 sequence", + parser.offset, -1) + } + + // Check the range of the value. + if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF { + return yaml_parser_set_reader_error(parser, + "invalid Unicode character", + parser.offset, int(value)) + } + + case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING: + var low, high int + if parser.encoding == yaml_UTF16LE_ENCODING { + low, high = 0, 1 + } else { + low, high = 1, 0 + } + + // The UTF-16 encoding is not as simple as one might + // naively think. Check RFC 2781 + // (http://www.ietf.org/rfc/rfc2781.txt). + // + // Normally, two subsequent bytes describe a Unicode + // character. However a special technique (called a + // surrogate pair) is used for specifying character + // values larger than 0xFFFF. + // + // A surrogate pair consists of two pseudo-characters: + // high surrogate area (0xD800-0xDBFF) + // low surrogate area (0xDC00-0xDFFF) + // + // The following formulas are used for decoding + // and encoding characters using surrogate pairs: + // + // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF) + // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF) + // W1 = 110110yyyyyyyyyy + // W2 = 110111xxxxxxxxxx + // + // where U is the character value, W1 is the high surrogate + // area, W2 is the low surrogate area. + + // Check for incomplete UTF-16 character. + if raw_unread < 2 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 character", + parser.offset, -1) + } + break inner + } + + // Get the character. + value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8) + + // Check for unexpected low surrogate area. + if value&0xFC00 == 0xDC00 { + return yaml_parser_set_reader_error(parser, + "unexpected low surrogate area", + parser.offset, int(value)) + } + + // Check for a high surrogate area. + if value&0xFC00 == 0xD800 { + width = 4 + + // Check for incomplete surrogate pair. + if raw_unread < 4 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 surrogate pair", + parser.offset, -1) + } + break inner + } + + // Get the next character. + value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8) + + // Check for a low surrogate area. + if value2&0xFC00 != 0xDC00 { + return yaml_parser_set_reader_error(parser, + "expected low surrogate area", + parser.offset+2, int(value2)) + } + + // Generate the value of the surrogate pair. + value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF) + } else { + width = 2 + } + + default: + panic("impossible") + } + + // Check if the character is in the allowed range: + // #x9 | #xA | #xD | [#x20-#x7E] (8 bit) + // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit) + // | [#x10000-#x10FFFF] (32 bit) + switch { + case value == 0x09: + case value == 0x0A: + case value == 0x0D: + case value >= 0x20 && value <= 0x7E: + case value == 0x85: + case value >= 0xA0 && value <= 0xD7FF: + case value >= 0xE000 && value <= 0xFFFD: + case value >= 0x10000 && value <= 0x10FFFF: + default: + return yaml_parser_set_reader_error(parser, + "control characters are not allowed", + parser.offset, int(value)) + } + + // Move the raw pointers. + parser.raw_buffer_pos += width + parser.offset += width + + // Finally put the character into the buffer. + if value <= 0x7F { + // 0000 0000-0000 007F . 0xxxxxxx + parser.buffer[buffer_len+0] = byte(value) + buffer_len += 1 + } else if value <= 0x7FF { + // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6)) + parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F)) + buffer_len += 2 + } else if value <= 0xFFFF { + // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F)) + buffer_len += 3 + } else { + // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F)) + buffer_len += 4 + } + + parser.unread++ + } + + // On EOF, put NUL into the buffer and return. + if parser.eof { + parser.buffer[buffer_len] = 0 + buffer_len++ + parser.unread++ + break + } + } + // [Go] Read the documentation of this function above. To return true, + // we need to have the given length in the buffer. Not doing that means + // every single check that calls this function to make sure the buffer + // has a given length is Go) panicking; or C) accessing invalid memory. + // This happens here due to the EOF above breaking early. + for buffer_len < length { + parser.buffer[buffer_len] = 0 + buffer_len++ + } + parser.buffer = parser.buffer[:buffer_len] + return true +} diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/resolve.go b/vendor/sigs.k8s.io/yaml/goyaml.v2/resolve.go new file mode 100644 index 00000000..4120e0c9 --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/goyaml.v2/resolve.go @@ -0,0 +1,258 @@ +package yaml + +import ( + "encoding/base64" + "math" + "regexp" + "strconv" + "strings" + "time" +) + +type resolveMapItem struct { + value interface{} + tag string +} + +var resolveTable = make([]byte, 256) +var resolveMap = make(map[string]resolveMapItem) + +func init() { + t := resolveTable + t[int('+')] = 'S' // Sign + t[int('-')] = 'S' + for _, c := range "0123456789" { + t[int(c)] = 'D' // Digit + } + for _, c := range "yYnNtTfFoO~" { + t[int(c)] = 'M' // In map + } + t[int('.')] = '.' // Float (potentially in map) + + var resolveMapList = []struct { + v interface{} + tag string + l []string + }{ + {true, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}}, + {true, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}}, + {true, yaml_BOOL_TAG, []string{"on", "On", "ON"}}, + {false, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}}, + {false, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}}, + {false, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}}, + {nil, yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}}, + {math.NaN(), yaml_FLOAT_TAG, []string{".nan", ".NaN", ".NAN"}}, + {math.Inf(+1), yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}}, + {math.Inf(+1), yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}}, + {math.Inf(-1), yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}}, + {"<<", yaml_MERGE_TAG, []string{"<<"}}, + } + + m := resolveMap + for _, item := range resolveMapList { + for _, s := range item.l { + m[s] = resolveMapItem{item.v, item.tag} + } + } +} + +const longTagPrefix = "tag:yaml.org,2002:" + +func shortTag(tag string) string { + // TODO This can easily be made faster and produce less garbage. + if strings.HasPrefix(tag, longTagPrefix) { + return "!!" + tag[len(longTagPrefix):] + } + return tag +} + +func longTag(tag string) string { + if strings.HasPrefix(tag, "!!") { + return longTagPrefix + tag[2:] + } + return tag +} + +func resolvableTag(tag string) bool { + switch tag { + case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG, yaml_TIMESTAMP_TAG: + return true + } + return false +} + +var yamlStyleFloat = regexp.MustCompile(`^[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?$`) + +func resolve(tag string, in string) (rtag string, out interface{}) { + if !resolvableTag(tag) { + return tag, in + } + + defer func() { + switch tag { + case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG: + return + case yaml_FLOAT_TAG: + if rtag == yaml_INT_TAG { + switch v := out.(type) { + case int64: + rtag = yaml_FLOAT_TAG + out = float64(v) + return + case int: + rtag = yaml_FLOAT_TAG + out = float64(v) + return + } + } + } + failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag)) + }() + + // Any data is accepted as a !!str or !!binary. + // Otherwise, the prefix is enough of a hint about what it might be. + hint := byte('N') + if in != "" { + hint = resolveTable[in[0]] + } + if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG { + // Handle things we can lookup in a map. + if item, ok := resolveMap[in]; ok { + return item.tag, item.value + } + + // Base 60 floats are a bad idea, were dropped in YAML 1.2, and + // are purposefully unsupported here. They're still quoted on + // the way out for compatibility with other parser, though. + + switch hint { + case 'M': + // We've already checked the map above. + + case '.': + // Not in the map, so maybe a normal float. + floatv, err := strconv.ParseFloat(in, 64) + if err == nil { + return yaml_FLOAT_TAG, floatv + } + + case 'D', 'S': + // Int, float, or timestamp. + // Only try values as a timestamp if the value is unquoted or there's an explicit + // !!timestamp tag. + if tag == "" || tag == yaml_TIMESTAMP_TAG { + t, ok := parseTimestamp(in) + if ok { + return yaml_TIMESTAMP_TAG, t + } + } + + plain := strings.Replace(in, "_", "", -1) + intv, err := strconv.ParseInt(plain, 0, 64) + if err == nil { + if intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) + } else { + return yaml_INT_TAG, intv + } + } + uintv, err := strconv.ParseUint(plain, 0, 64) + if err == nil { + return yaml_INT_TAG, uintv + } + if yamlStyleFloat.MatchString(plain) { + floatv, err := strconv.ParseFloat(plain, 64) + if err == nil { + return yaml_FLOAT_TAG, floatv + } + } + if strings.HasPrefix(plain, "0b") { + intv, err := strconv.ParseInt(plain[2:], 2, 64) + if err == nil { + if intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) + } else { + return yaml_INT_TAG, intv + } + } + uintv, err := strconv.ParseUint(plain[2:], 2, 64) + if err == nil { + return yaml_INT_TAG, uintv + } + } else if strings.HasPrefix(plain, "-0b") { + intv, err := strconv.ParseInt("-" + plain[3:], 2, 64) + if err == nil { + if true || intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) + } else { + return yaml_INT_TAG, intv + } + } + } + default: + panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")") + } + } + return yaml_STR_TAG, in +} + +// encodeBase64 encodes s as base64 that is broken up into multiple lines +// as appropriate for the resulting length. +func encodeBase64(s string) string { + const lineLen = 70 + encLen := base64.StdEncoding.EncodedLen(len(s)) + lines := encLen/lineLen + 1 + buf := make([]byte, encLen*2+lines) + in := buf[0:encLen] + out := buf[encLen:] + base64.StdEncoding.Encode(in, []byte(s)) + k := 0 + for i := 0; i < len(in); i += lineLen { + j := i + lineLen + if j > len(in) { + j = len(in) + } + k += copy(out[k:], in[i:j]) + if lines > 1 { + out[k] = '\n' + k++ + } + } + return string(out[:k]) +} + +// This is a subset of the formats allowed by the regular expression +// defined at http://yaml.org/type/timestamp.html. +var allowedTimestampFormats = []string{ + "2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields. + "2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t". + "2006-1-2 15:4:5.999999999", // space separated with no time zone + "2006-1-2", // date only + // Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5" + // from the set of examples. +} + +// parseTimestamp parses s as a timestamp string and +// returns the timestamp and reports whether it succeeded. +// Timestamp formats are defined at http://yaml.org/type/timestamp.html +func parseTimestamp(s string) (time.Time, bool) { + // TODO write code to check all the formats supported by + // http://yaml.org/type/timestamp.html instead of using time.Parse. + + // Quick check: all date formats start with YYYY-. + i := 0 + for ; i < len(s); i++ { + if c := s[i]; c < '0' || c > '9' { + break + } + } + if i != 4 || i == len(s) || s[i] != '-' { + return time.Time{}, false + } + for _, format := range allowedTimestampFormats { + if t, err := time.Parse(format, s); err == nil { + return t, true + } + } + return time.Time{}, false +} diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/scannerc.go b/vendor/sigs.k8s.io/yaml/goyaml.v2/scannerc.go new file mode 100644 index 00000000..0b9bb603 --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/goyaml.v2/scannerc.go @@ -0,0 +1,2711 @@ +package yaml + +import ( + "bytes" + "fmt" +) + +// Introduction +// ************ +// +// The following notes assume that you are familiar with the YAML specification +// (http://yaml.org/spec/1.2/spec.html). We mostly follow it, although in +// some cases we are less restrictive that it requires. +// +// The process of transforming a YAML stream into a sequence of events is +// divided on two steps: Scanning and Parsing. +// +// The Scanner transforms the input stream into a sequence of tokens, while the +// parser transform the sequence of tokens produced by the Scanner into a +// sequence of parsing events. +// +// The Scanner is rather clever and complicated. The Parser, on the contrary, +// is a straightforward implementation of a recursive-descendant parser (or, +// LL(1) parser, as it is usually called). +// +// Actually there are two issues of Scanning that might be called "clever", the +// rest is quite straightforward. The issues are "block collection start" and +// "simple keys". Both issues are explained below in details. +// +// Here the Scanning step is explained and implemented. We start with the list +// of all the tokens produced by the Scanner together with short descriptions. +// +// Now, tokens: +// +// STREAM-START(encoding) # The stream start. +// STREAM-END # The stream end. +// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive. +// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive. +// DOCUMENT-START # '---' +// DOCUMENT-END # '...' +// BLOCK-SEQUENCE-START # Indentation increase denoting a block +// BLOCK-MAPPING-START # sequence or a block mapping. +// BLOCK-END # Indentation decrease. +// FLOW-SEQUENCE-START # '[' +// FLOW-SEQUENCE-END # ']' +// BLOCK-SEQUENCE-START # '{' +// BLOCK-SEQUENCE-END # '}' +// BLOCK-ENTRY # '-' +// FLOW-ENTRY # ',' +// KEY # '?' or nothing (simple keys). +// VALUE # ':' +// ALIAS(anchor) # '*anchor' +// ANCHOR(anchor) # '&anchor' +// TAG(handle,suffix) # '!handle!suffix' +// SCALAR(value,style) # A scalar. +// +// The following two tokens are "virtual" tokens denoting the beginning and the +// end of the stream: +// +// STREAM-START(encoding) +// STREAM-END +// +// We pass the information about the input stream encoding with the +// STREAM-START token. +// +// The next two tokens are responsible for tags: +// +// VERSION-DIRECTIVE(major,minor) +// TAG-DIRECTIVE(handle,prefix) +// +// Example: +// +// %YAML 1.1 +// %TAG ! !foo +// %TAG !yaml! tag:yaml.org,2002: +// --- +// +// The correspoding sequence of tokens: +// +// STREAM-START(utf-8) +// VERSION-DIRECTIVE(1,1) +// TAG-DIRECTIVE("!","!foo") +// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:") +// DOCUMENT-START +// STREAM-END +// +// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole +// line. +// +// The document start and end indicators are represented by: +// +// DOCUMENT-START +// DOCUMENT-END +// +// Note that if a YAML stream contains an implicit document (without '---' +// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be +// produced. +// +// In the following examples, we present whole documents together with the +// produced tokens. +// +// 1. An implicit document: +// +// 'a scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// STREAM-END +// +// 2. An explicit document: +// +// --- +// 'a scalar' +// ... +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// SCALAR("a scalar",single-quoted) +// DOCUMENT-END +// STREAM-END +// +// 3. Several documents in a stream: +// +// 'a scalar' +// --- +// 'another scalar' +// --- +// 'yet another scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// DOCUMENT-START +// SCALAR("another scalar",single-quoted) +// DOCUMENT-START +// SCALAR("yet another scalar",single-quoted) +// STREAM-END +// +// We have already introduced the SCALAR token above. The following tokens are +// used to describe aliases, anchors, tag, and scalars: +// +// ALIAS(anchor) +// ANCHOR(anchor) +// TAG(handle,suffix) +// SCALAR(value,style) +// +// The following series of examples illustrate the usage of these tokens: +// +// 1. A recursive sequence: +// +// &A [ *A ] +// +// Tokens: +// +// STREAM-START(utf-8) +// ANCHOR("A") +// FLOW-SEQUENCE-START +// ALIAS("A") +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A tagged scalar: +// +// !!float "3.14" # A good approximation. +// +// Tokens: +// +// STREAM-START(utf-8) +// TAG("!!","float") +// SCALAR("3.14",double-quoted) +// STREAM-END +// +// 3. Various scalar styles: +// +// --- # Implicit empty plain scalars do not produce tokens. +// --- a plain scalar +// --- 'a single-quoted scalar' +// --- "a double-quoted scalar" +// --- |- +// a literal scalar +// --- >- +// a folded +// scalar +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// DOCUMENT-START +// SCALAR("a plain scalar",plain) +// DOCUMENT-START +// SCALAR("a single-quoted scalar",single-quoted) +// DOCUMENT-START +// SCALAR("a double-quoted scalar",double-quoted) +// DOCUMENT-START +// SCALAR("a literal scalar",literal) +// DOCUMENT-START +// SCALAR("a folded scalar",folded) +// STREAM-END +// +// Now it's time to review collection-related tokens. We will start with +// flow collections: +// +// FLOW-SEQUENCE-START +// FLOW-SEQUENCE-END +// FLOW-MAPPING-START +// FLOW-MAPPING-END +// FLOW-ENTRY +// KEY +// VALUE +// +// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and +// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}' +// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the +// indicators '?' and ':', which are used for denoting mapping keys and values, +// are represented by the KEY and VALUE tokens. +// +// The following examples show flow collections: +// +// 1. A flow sequence: +// +// [item 1, item 2, item 3] +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-SEQUENCE-START +// SCALAR("item 1",plain) +// FLOW-ENTRY +// SCALAR("item 2",plain) +// FLOW-ENTRY +// SCALAR("item 3",plain) +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A flow mapping: +// +// { +// a simple key: a value, # Note that the KEY token is produced. +// ? a complex key: another value, +// } +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// FLOW-ENTRY +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// FLOW-ENTRY +// FLOW-MAPPING-END +// STREAM-END +// +// A simple key is a key which is not denoted by the '?' indicator. Note that +// the Scanner still produce the KEY token whenever it encounters a simple key. +// +// For scanning block collections, the following tokens are used (note that we +// repeat KEY and VALUE here): +// +// BLOCK-SEQUENCE-START +// BLOCK-MAPPING-START +// BLOCK-END +// BLOCK-ENTRY +// KEY +// VALUE +// +// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation +// increase that precedes a block collection (cf. the INDENT token in Python). +// The token BLOCK-END denote indentation decrease that ends a block collection +// (cf. the DEDENT token in Python). However YAML has some syntax pecularities +// that makes detections of these tokens more complex. +// +// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators +// '-', '?', and ':' correspondingly. +// +// The following examples show how the tokens BLOCK-SEQUENCE-START, +// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner: +// +// 1. Block sequences: +// +// - item 1 +// - item 2 +// - +// - item 3.1 +// - item 3.2 +// - +// key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 3.1",plain) +// BLOCK-ENTRY +// SCALAR("item 3.2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Block mappings: +// +// a simple key: a value # The KEY token is produced here. +// ? a complex key +// : another value +// a mapping: +// key 1: value 1 +// key 2: value 2 +// a sequence: +// - item 1 +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// KEY +// SCALAR("a mapping",plain) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML does not always require to start a new block collection from a new +// line. If the current line contains only '-', '?', and ':' indicators, a new +// block collection may start at the current line. The following examples +// illustrate this case: +// +// 1. Collections in a sequence: +// +// - - item 1 +// - item 2 +// - key 1: value 1 +// key 2: value 2 +// - ? complex key +// : complex value +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("complex key") +// VALUE +// SCALAR("complex value") +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Collections in a mapping: +// +// ? a sequence +// : - item 1 +// - item 2 +// ? a mapping +// : key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// KEY +// SCALAR("a mapping",plain) +// VALUE +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML also permits non-indented sequences if they are included into a block +// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced: +// +// key: +// - item 1 # BLOCK-SEQUENCE-START is NOT produced here. +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key",plain) +// VALUE +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// + +// Ensure that the buffer contains the required number of characters. +// Return true on success, false on failure (reader error or memory error). +func cache(parser *yaml_parser_t, length int) bool { + // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B) + return parser.unread >= length || yaml_parser_update_buffer(parser, length) +} + +// Advance the buffer pointer. +func skip(parser *yaml_parser_t) { + parser.mark.index++ + parser.mark.column++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) +} + +func skip_line(parser *yaml_parser_t) { + if is_crlf(parser.buffer, parser.buffer_pos) { + parser.mark.index += 2 + parser.mark.column = 0 + parser.mark.line++ + parser.unread -= 2 + parser.buffer_pos += 2 + } else if is_break(parser.buffer, parser.buffer_pos) { + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) + } +} + +// Copy a character to a string buffer and advance pointers. +func read(parser *yaml_parser_t, s []byte) []byte { + w := width(parser.buffer[parser.buffer_pos]) + if w == 0 { + panic("invalid character sequence") + } + if len(s) == 0 { + s = make([]byte, 0, 32) + } + if w == 1 && len(s)+w <= cap(s) { + s = s[:len(s)+1] + s[len(s)-1] = parser.buffer[parser.buffer_pos] + parser.buffer_pos++ + } else { + s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...) + parser.buffer_pos += w + } + parser.mark.index++ + parser.mark.column++ + parser.unread-- + return s +} + +// Copy a line break character to a string buffer and advance pointers. +func read_line(parser *yaml_parser_t, s []byte) []byte { + buf := parser.buffer + pos := parser.buffer_pos + switch { + case buf[pos] == '\r' && buf[pos+1] == '\n': + // CR LF . LF + s = append(s, '\n') + parser.buffer_pos += 2 + parser.mark.index++ + parser.unread-- + case buf[pos] == '\r' || buf[pos] == '\n': + // CR|LF . LF + s = append(s, '\n') + parser.buffer_pos += 1 + case buf[pos] == '\xC2' && buf[pos+1] == '\x85': + // NEL . LF + s = append(s, '\n') + parser.buffer_pos += 2 + case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'): + // LS|PS . LS|PS + s = append(s, buf[parser.buffer_pos:pos+3]...) + parser.buffer_pos += 3 + default: + return s + } + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + return s +} + +// Get the next token. +func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool { + // Erase the token object. + *token = yaml_token_t{} // [Go] Is this necessary? + + // No tokens after STREAM-END or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR { + return true + } + + // Ensure that the tokens queue contains enough tokens. + if !parser.token_available { + if !yaml_parser_fetch_more_tokens(parser) { + return false + } + } + + // Fetch the next token from the queue. + *token = parser.tokens[parser.tokens_head] + parser.tokens_head++ + parser.tokens_parsed++ + parser.token_available = false + + if token.typ == yaml_STREAM_END_TOKEN { + parser.stream_end_produced = true + } + return true +} + +// Set the scanner error and return false. +func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool { + parser.error = yaml_SCANNER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = parser.mark + return false +} + +func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool { + context := "while parsing a tag" + if directive { + context = "while parsing a %TAG directive" + } + return yaml_parser_set_scanner_error(parser, context, context_mark, problem) +} + +func trace(args ...interface{}) func() { + pargs := append([]interface{}{"+++"}, args...) + fmt.Println(pargs...) + pargs = append([]interface{}{"---"}, args...) + return func() { fmt.Println(pargs...) } +} + +// Ensure that the tokens queue contains at least one token which can be +// returned to the Parser. +func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool { + // While we need more tokens to fetch, do it. + for { + if parser.tokens_head != len(parser.tokens) { + // If queue is non-empty, check if any potential simple key may + // occupy the head position. + head_tok_idx, ok := parser.simple_keys_by_tok[parser.tokens_parsed] + if !ok { + break + } else if valid, ok := yaml_simple_key_is_valid(parser, &parser.simple_keys[head_tok_idx]); !ok { + return false + } else if !valid { + break + } + } + // Fetch the next token. + if !yaml_parser_fetch_next_token(parser) { + return false + } + } + + parser.token_available = true + return true +} + +// The dispatcher for token fetchers. +func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool { + // Ensure that the buffer is initialized. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we just started scanning. Fetch STREAM-START then. + if !parser.stream_start_produced { + return yaml_parser_fetch_stream_start(parser) + } + + // Eat whitespaces and comments until we reach the next token. + if !yaml_parser_scan_to_next_token(parser) { + return false + } + + // Check the indentation level against the current column. + if !yaml_parser_unroll_indent(parser, parser.mark.column) { + return false + } + + // Ensure that the buffer contains at least 4 characters. 4 is the length + // of the longest indicators ('--- ' and '... '). + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + // Is it the end of the stream? + if is_z(parser.buffer, parser.buffer_pos) { + return yaml_parser_fetch_stream_end(parser) + } + + // Is it a directive? + if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' { + return yaml_parser_fetch_directive(parser) + } + + buf := parser.buffer + pos := parser.buffer_pos + + // Is it the document start indicator? + if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN) + } + + // Is it the document end indicator? + if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN) + } + + // Is it the flow sequence start indicator? + if buf[pos] == '[' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN) + } + + // Is it the flow mapping start indicator? + if parser.buffer[parser.buffer_pos] == '{' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN) + } + + // Is it the flow sequence end indicator? + if parser.buffer[parser.buffer_pos] == ']' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_SEQUENCE_END_TOKEN) + } + + // Is it the flow mapping end indicator? + if parser.buffer[parser.buffer_pos] == '}' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_MAPPING_END_TOKEN) + } + + // Is it the flow entry indicator? + if parser.buffer[parser.buffer_pos] == ',' { + return yaml_parser_fetch_flow_entry(parser) + } + + // Is it the block entry indicator? + if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) { + return yaml_parser_fetch_block_entry(parser) + } + + // Is it the key indicator? + if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_key(parser) + } + + // Is it the value indicator? + if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_value(parser) + } + + // Is it an alias? + if parser.buffer[parser.buffer_pos] == '*' { + return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN) + } + + // Is it an anchor? + if parser.buffer[parser.buffer_pos] == '&' { + return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN) + } + + // Is it a tag? + if parser.buffer[parser.buffer_pos] == '!' { + return yaml_parser_fetch_tag(parser) + } + + // Is it a literal scalar? + if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, true) + } + + // Is it a folded scalar? + if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, false) + } + + // Is it a single-quoted scalar? + if parser.buffer[parser.buffer_pos] == '\'' { + return yaml_parser_fetch_flow_scalar(parser, true) + } + + // Is it a double-quoted scalar? + if parser.buffer[parser.buffer_pos] == '"' { + return yaml_parser_fetch_flow_scalar(parser, false) + } + + // Is it a plain scalar? + // + // A plain scalar may start with any non-blank characters except + // + // '-', '?', ':', ',', '[', ']', '{', '}', + // '#', '&', '*', '!', '|', '>', '\'', '\"', + // '%', '@', '`'. + // + // In the block context (and, for the '-' indicator, in the flow context + // too), it may also start with the characters + // + // '-', '?', ':' + // + // if it is followed by a non-space character. + // + // The last rule is more restrictive than the specification requires. + // [Go] Make this logic more reasonable. + //switch parser.buffer[parser.buffer_pos] { + //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`': + //} + if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' || + parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' || + parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') || + (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level == 0 && + (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') && + !is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_plain_scalar(parser) + } + + // If we don't determine the token type so far, it is an error. + return yaml_parser_set_scanner_error(parser, + "while scanning for the next token", parser.mark, + "found character that cannot start any token") +} + +func yaml_simple_key_is_valid(parser *yaml_parser_t, simple_key *yaml_simple_key_t) (valid, ok bool) { + if !simple_key.possible { + return false, true + } + + // The 1.2 specification says: + // + // "If the ? indicator is omitted, parsing needs to see past the + // implicit key to recognize it as such. To limit the amount of + // lookahead required, the “:” indicator must appear at most 1024 + // Unicode characters beyond the start of the key. In addition, the key + // is restricted to a single line." + // + if simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index { + // Check if the potential simple key to be removed is required. + if simple_key.required { + return false, yaml_parser_set_scanner_error(parser, + "while scanning a simple key", simple_key.mark, + "could not find expected ':'") + } + simple_key.possible = false + return false, true + } + return true, true +} + +// Check if a simple key may start at the current position and add it if +// needed. +func yaml_parser_save_simple_key(parser *yaml_parser_t) bool { + // A simple key is required at the current position if the scanner is in + // the block context and the current column coincides with the indentation + // level. + + required := parser.flow_level == 0 && parser.indent == parser.mark.column + + // + // If the current position may start a simple key, save it. + // + if parser.simple_key_allowed { + simple_key := yaml_simple_key_t{ + possible: true, + required: required, + token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + mark: parser.mark, + } + + if !yaml_parser_remove_simple_key(parser) { + return false + } + parser.simple_keys[len(parser.simple_keys)-1] = simple_key + parser.simple_keys_by_tok[simple_key.token_number] = len(parser.simple_keys) - 1 + } + return true +} + +// Remove a potential simple key at the current flow level. +func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool { + i := len(parser.simple_keys) - 1 + if parser.simple_keys[i].possible { + // If the key is required, it is an error. + if parser.simple_keys[i].required { + return yaml_parser_set_scanner_error(parser, + "while scanning a simple key", parser.simple_keys[i].mark, + "could not find expected ':'") + } + // Remove the key from the stack. + parser.simple_keys[i].possible = false + delete(parser.simple_keys_by_tok, parser.simple_keys[i].token_number) + } + return true +} + +// max_flow_level limits the flow_level +const max_flow_level = 10000 + +// Increase the flow level and resize the simple key list if needed. +func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool { + // Reset the simple key on the next level. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{ + possible: false, + required: false, + token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + mark: parser.mark, + }) + + // Increase the flow level. + parser.flow_level++ + if parser.flow_level > max_flow_level { + return yaml_parser_set_scanner_error(parser, + "while increasing flow level", parser.simple_keys[len(parser.simple_keys)-1].mark, + fmt.Sprintf("exceeded max depth of %d", max_flow_level)) + } + return true +} + +// Decrease the flow level. +func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool { + if parser.flow_level > 0 { + parser.flow_level-- + last := len(parser.simple_keys) - 1 + delete(parser.simple_keys_by_tok, parser.simple_keys[last].token_number) + parser.simple_keys = parser.simple_keys[:last] + } + return true +} + +// max_indents limits the indents stack size +const max_indents = 10000 + +// Push the current indentation level to the stack and set the new level +// the current column is greater than the indentation level. In this case, +// append or insert the specified token into the token queue. +func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + if parser.indent < column { + // Push the current indentation level to the stack and set the new + // indentation level. + parser.indents = append(parser.indents, parser.indent) + parser.indent = column + if len(parser.indents) > max_indents { + return yaml_parser_set_scanner_error(parser, + "while increasing indent level", parser.simple_keys[len(parser.simple_keys)-1].mark, + fmt.Sprintf("exceeded max depth of %d", max_indents)) + } + + // Create a token and insert it into the queue. + token := yaml_token_t{ + typ: typ, + start_mark: mark, + end_mark: mark, + } + if number > -1 { + number -= parser.tokens_parsed + } + yaml_insert_token(parser, number, &token) + } + return true +} + +// Pop indentation levels from the indents stack until the current level +// becomes less or equal to the column. For each indentation level, append +// the BLOCK-END token. +func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + // Loop through the indentation levels in the stack. + for parser.indent > column { + // Create a token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + + // Pop the indentation level. + parser.indent = parser.indents[len(parser.indents)-1] + parser.indents = parser.indents[:len(parser.indents)-1] + } + return true +} + +// Initialize the scanner and produce the STREAM-START token. +func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool { + + // Set the initial indentation. + parser.indent = -1 + + // Initialize the simple key stack. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) + + parser.simple_keys_by_tok = make(map[int]int) + + // A simple key is allowed at the beginning of the stream. + parser.simple_key_allowed = true + + // We have started. + parser.stream_start_produced = true + + // Create the STREAM-START token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_START_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + encoding: parser.encoding, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the STREAM-END token and shut down the scanner. +func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool { + + // Force new line. + if parser.mark.column != 0 { + parser.mark.column = 0 + parser.mark.line++ + } + + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the STREAM-END token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token. +func yaml_parser_fetch_directive(parser *yaml_parser_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token. + token := yaml_token_t{} + if !yaml_parser_scan_directive(parser, &token) { + return false + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the DOCUMENT-START or DOCUMENT-END token. +func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Consume the token. + start_mark := parser.mark + + skip(parser) + skip(parser) + skip(parser) + + end_mark := parser.mark + + // Create the DOCUMENT-START or DOCUMENT-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token. +func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // The indicators '[' and '{' may start a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // Increase the flow level. + if !yaml_parser_increase_flow_level(parser) { + return false + } + + // A simple key may follow the indicators '[' and '{'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token. +func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset any potential simple key on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Decrease the flow level. + if !yaml_parser_decrease_flow_level(parser) { + return false + } + + // No simple keys after the indicators ']' and '}'. + parser.simple_key_allowed = false + + // Consume the token. + + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-ENTRY token. +func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool { + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after ','. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_FLOW_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the BLOCK-ENTRY token. +func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool { + // Check if the scanner is in the block context. + if parser.flow_level == 0 { + // Check if we are allowed to start a new entry. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "block sequence entries are not allowed in this context") + } + // Add the BLOCK-SEQUENCE-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) { + return false + } + } else { + // It is an error for the '-' indicator to occur in the flow context, + // but we let the Parser detect and report about it because the Parser + // is able to point to the context. + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '-'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the BLOCK-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the KEY token. +func yaml_parser_fetch_key(parser *yaml_parser_t) bool { + + // In the block context, additional checks are required. + if parser.flow_level == 0 { + // Check if we are allowed to start a new key (not nessesary simple). + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping keys are not allowed in this context") + } + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '?' in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the KEY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the VALUE token. +func yaml_parser_fetch_value(parser *yaml_parser_t) bool { + + simple_key := &parser.simple_keys[len(parser.simple_keys)-1] + + // Have we found a simple key? + if valid, ok := yaml_simple_key_is_valid(parser, simple_key); !ok { + return false + + } else if valid { + + // Create the KEY token and insert it into the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: simple_key.mark, + end_mark: simple_key.mark, + } + yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token) + + // In the block context, we may need to add the BLOCK-MAPPING-START token. + if !yaml_parser_roll_indent(parser, simple_key.mark.column, + simple_key.token_number, + yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) { + return false + } + + // Remove the simple key. + simple_key.possible = false + delete(parser.simple_keys_by_tok, simple_key.token_number) + + // A simple key cannot follow another simple key. + parser.simple_key_allowed = false + + } else { + // The ':' indicator follows a complex key. + + // In the block context, extra checks are required. + if parser.flow_level == 0 { + + // Check if we are allowed to start a complex value. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping values are not allowed in this context") + } + + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Simple keys after ':' are allowed in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + } + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the VALUE token and append it to the queue. + token := yaml_token_t{ + typ: yaml_VALUE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the ALIAS or ANCHOR token. +func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // An anchor or an alias could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow an anchor or an alias. + parser.simple_key_allowed = false + + // Create the ALIAS or ANCHOR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_anchor(parser, &token, typ) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the TAG token. +func yaml_parser_fetch_tag(parser *yaml_parser_t) bool { + // A tag could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a tag. + parser.simple_key_allowed = false + + // Create the TAG token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_tag(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens. +func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool { + // Remove any potential simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // A simple key may follow a block scalar. + parser.simple_key_allowed = true + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_block_scalar(parser, &token, literal) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens. +func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_flow_scalar(parser, &token, single) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,plain) token. +func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_plain_scalar(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Eat whitespaces and comments until the next token is found. +func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool { + + // Until the next token is not found. + for { + // Allow the BOM mark to start a line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) { + skip(parser) + } + + // Eat whitespaces. + // Tabs are allowed: + // - in the flow context + // - in the block context, but not at the beginning of the line or + // after '-', '?', or ':' (complex value). + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Eat a comment until a line break. + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // If it is a line break, eat it. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + + // In the block context, a new line may start a simple key. + if parser.flow_level == 0 { + parser.simple_key_allowed = true + } + } else { + break // We have found a token. + } + } + + return true +} + +// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool { + // Eat '%'. + start_mark := parser.mark + skip(parser) + + // Scan the directive name. + var name []byte + if !yaml_parser_scan_directive_name(parser, start_mark, &name) { + return false + } + + // Is it a YAML directive? + if bytes.Equal(name, []byte("YAML")) { + // Scan the VERSION directive value. + var major, minor int8 + if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) { + return false + } + end_mark := parser.mark + + // Create a VERSION-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_VERSION_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + major: major, + minor: minor, + } + + // Is it a TAG directive? + } else if bytes.Equal(name, []byte("TAG")) { + // Scan the TAG directive value. + var handle, prefix []byte + if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) { + return false + } + end_mark := parser.mark + + // Create a TAG-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_TAG_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + prefix: prefix, + } + + // Unknown directive. + } else { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unknown directive name") + return false + } + + // Eat the rest of the line including any comments. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + return true +} + +// Scan the directive name. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^ +// +func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool { + // Consume the directive name. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + var s []byte + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the name is empty. + if len(s) == 0 { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "could not find expected directive name") + return false + } + + // Check for an blank character after the name. + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unexpected non-alphabetical character") + return false + } + *name = s + return true +} + +// Scan the value of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^ +func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool { + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the major version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, major) { + return false + } + + // Eat '.'. + if parser.buffer[parser.buffer_pos] != '.' { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected digit or '.' character") + } + + skip(parser) + + // Consume the minor version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) { + return false + } + return true +} + +const max_number_length = 2 + +// Scan the version number of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^ +// %YAML 1.1 # a comment \n +// ^ +func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool { + + // Repeat while the next character is digit. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var value, length int8 + for is_digit(parser.buffer, parser.buffer_pos) { + // Check if the number is too long. + length++ + if length > max_number_length { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "found extremely long version number") + } + value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos)) + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the number was present. + if length == 0 { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected version number") + } + *number = value + return true +} + +// Scan the value of a TAG-DIRECTIVE token. +// +// Scope: +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool { + var handle_value, prefix_value []byte + + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a handle. + if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) { + return false + } + + // Expect a whitespace. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blank(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace") + return false + } + + // Eat whitespaces. + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a prefix. + if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) { + return false + } + + // Expect a whitespace or line break. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace or line break") + return false + } + + *handle = handle_value + *prefix = prefix_value + return true +} + +func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool { + var s []byte + + // Eat the indicator character. + start_mark := parser.mark + skip(parser) + + // Consume the value. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + end_mark := parser.mark + + /* + * Check if length of the anchor is greater than 0 and it is followed by + * a whitespace character or one of the indicators: + * + * '?', ':', ',', ']', '}', '%', '@', '`'. + */ + + if len(s) == 0 || + !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' || + parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '`') { + context := "while scanning an alias" + if typ == yaml_ANCHOR_TOKEN { + context = "while scanning an anchor" + } + yaml_parser_set_scanner_error(parser, context, start_mark, + "did not find expected alphabetic or numeric character") + return false + } + + // Create a token. + *token = yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + value: s, + } + + return true +} + +/* + * Scan a TAG token. + */ + +func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool { + var handle, suffix []byte + + start_mark := parser.mark + + // Check if the tag is in the canonical form. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + if parser.buffer[parser.buffer_pos+1] == '<' { + // Keep the handle as '' + + // Eat '!<' + skip(parser) + skip(parser) + + // Consume the tag value. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + + // Check for '>' and eat it. + if parser.buffer[parser.buffer_pos] != '>' { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find the expected '>'") + return false + } + + skip(parser) + } else { + // The tag has either the '!suffix' or the '!handle!suffix' form. + + // First, try to scan a handle. + if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) { + return false + } + + // Check if it is, indeed, handle. + if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' { + // Scan the suffix now. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + } else { + // It wasn't a handle after all. Scan the rest of the tag. + if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) { + return false + } + + // Set the handle to '!'. + handle = []byte{'!'} + + // A special case: the '!' tag. Set the handle to '' and the + // suffix to '!'. + if len(suffix) == 0 { + handle, suffix = suffix, handle + } + } + } + + // Check the character which ends the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find expected whitespace or line break") + return false + } + + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_TAG_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + suffix: suffix, + } + return true +} + +// Scan a tag handle. +func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool { + // Check the initial '!' character. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] != '!' { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + + var s []byte + + // Copy the '!' character. + s = read(parser, s) + + // Copy all subsequent alphabetical and numerical characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the trailing character is '!' and copy it. + if parser.buffer[parser.buffer_pos] == '!' { + s = read(parser, s) + } else { + // It's either the '!' tag or not really a tag handle. If it's a %TAG + // directive, it's an error. If it's a tag token, it must be a part of URI. + if directive && string(s) != "!" { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + } + + *handle = s + return true +} + +// Scan a tag. +func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool { + //size_t length = head ? strlen((char *)head) : 0 + var s []byte + hasTag := len(head) > 0 + + // Copy the head if needed. + // + // Note that we don't copy the leading '!' character. + if len(head) > 1 { + s = append(s, head[1:]...) + } + + // Scan the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // The set of characters that may appear in URI is as follows: + // + // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&', + // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']', + // '%'. + // [Go] Convert this into more reasonable logic. + for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' || + parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' || + parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' || + parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' || + parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' || + parser.buffer[parser.buffer_pos] == '%' { + // Check if it is a URI-escape sequence. + if parser.buffer[parser.buffer_pos] == '%' { + if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) { + return false + } + } else { + s = read(parser, s) + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + hasTag = true + } + + if !hasTag { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected tag URI") + return false + } + *uri = s + return true +} + +// Decode an URI-escape sequence corresponding to a single UTF-8 character. +func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool { + + // Decode the required number of characters. + w := 1024 + for w > 0 { + // Check for a URI-escaped octet. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + + if !(parser.buffer[parser.buffer_pos] == '%' && + is_hex(parser.buffer, parser.buffer_pos+1) && + is_hex(parser.buffer, parser.buffer_pos+2)) { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find URI escaped octet") + } + + // Get the octet. + octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2)) + + // If it is the leading octet, determine the length of the UTF-8 sequence. + if w == 1024 { + w = width(octet) + if w == 0 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect leading UTF-8 octet") + } + } else { + // Check if the trailing octet is correct. + if octet&0xC0 != 0x80 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect trailing UTF-8 octet") + } + } + + // Copy the octet and move the pointers. + *s = append(*s, octet) + skip(parser) + skip(parser) + skip(parser) + w-- + } + return true +} + +// Scan a block scalar. +func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool { + // Eat the indicator '|' or '>'. + start_mark := parser.mark + skip(parser) + + // Scan the additional block scalar indicators. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check for a chomping indicator. + var chomping, increment int + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + // Set the chomping method and eat the indicator. + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + + // Check for an indentation indicator. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if is_digit(parser.buffer, parser.buffer_pos) { + // Check that the indentation is greater than 0. + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + + // Get the indentation level and eat the indicator. + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + } + + } else if is_digit(parser.buffer, parser.buffer_pos) { + // Do the same as above, but in the opposite order. + + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + } + } + + // Eat whitespaces and comments to the end of the line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + end_mark := parser.mark + + // Set the indentation level if it was specified. + var indent int + if increment > 0 { + if parser.indent >= 0 { + indent = parser.indent + increment + } else { + indent = increment + } + } + + // Scan the leading line breaks and determine the indentation level if needed. + var s, leading_break, trailing_breaks []byte + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + + // Scan the block scalar content. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var leading_blank, trailing_blank bool + for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) { + // We are at the beginning of a non-empty line. + + // Is it a trailing whitespace? + trailing_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Check if we need to fold the leading line break. + if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' { + // Do we need to join the lines by space? + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } + } else { + s = append(s, leading_break...) + } + leading_break = leading_break[:0] + + // Append the remaining line breaks. + s = append(s, trailing_breaks...) + trailing_breaks = trailing_breaks[:0] + + // Is it a leading whitespace? + leading_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Consume the current line. + for !is_breakz(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + leading_break = read_line(parser, leading_break) + + // Eat the following indentation spaces and line breaks. + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + } + + // Chomp the tail. + if chomping != -1 { + s = append(s, leading_break...) + } + if chomping == 1 { + s = append(s, trailing_breaks...) + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_LITERAL_SCALAR_STYLE, + } + if !literal { + token.style = yaml_FOLDED_SCALAR_STYLE + } + return true +} + +// Scan indentation spaces and line breaks for a block scalar. Determine the +// indentation level if needed. +func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool { + *end_mark = parser.mark + + // Eat the indentation spaces and line breaks. + max_indent := 0 + for { + // Eat the indentation spaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.mark.column > max_indent { + max_indent = parser.mark.column + } + + // Check for a tab character messing the indentation. + if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) { + return yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found a tab character where an indentation space is expected") + } + + // Have we found a non-empty line? + if !is_break(parser.buffer, parser.buffer_pos) { + break + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + // [Go] Should really be returning breaks instead. + *breaks = read_line(parser, *breaks) + *end_mark = parser.mark + } + + // Determine the indentation level if needed. + if *indent == 0 { + *indent = max_indent + if *indent < parser.indent+1 { + *indent = parser.indent + 1 + } + if *indent < 1 { + *indent = 1 + } + } + return true +} + +// Scan a quoted scalar. +func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool { + // Eat the left quote. + start_mark := parser.mark + skip(parser) + + // Consume the content of the quoted scalar. + var s, leading_break, trailing_breaks, whitespaces []byte + for { + // Check that there are no document indicators at the beginning of the line. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected document indicator") + return false + } + + // Check for EOF. + if is_z(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected end of stream") + return false + } + + // Consume non-blank characters. + leading_blanks := false + for !is_blankz(parser.buffer, parser.buffer_pos) { + if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' { + // Is is an escaped single quote. + s = append(s, '\'') + skip(parser) + skip(parser) + + } else if single && parser.buffer[parser.buffer_pos] == '\'' { + // It is a right single quote. + break + } else if !single && parser.buffer[parser.buffer_pos] == '"' { + // It is a right double quote. + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) { + // It is an escaped line break. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + skip(parser) + skip_line(parser) + leading_blanks = true + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' { + // It is an escape sequence. + code_length := 0 + + // Check the escape character. + switch parser.buffer[parser.buffer_pos+1] { + case '0': + s = append(s, 0) + case 'a': + s = append(s, '\x07') + case 'b': + s = append(s, '\x08') + case 't', '\t': + s = append(s, '\x09') + case 'n': + s = append(s, '\x0A') + case 'v': + s = append(s, '\x0B') + case 'f': + s = append(s, '\x0C') + case 'r': + s = append(s, '\x0D') + case 'e': + s = append(s, '\x1B') + case ' ': + s = append(s, '\x20') + case '"': + s = append(s, '"') + case '\'': + s = append(s, '\'') + case '\\': + s = append(s, '\\') + case 'N': // NEL (#x85) + s = append(s, '\xC2') + s = append(s, '\x85') + case '_': // #xA0 + s = append(s, '\xC2') + s = append(s, '\xA0') + case 'L': // LS (#x2028) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA8') + case 'P': // PS (#x2029) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA9') + case 'x': + code_length = 2 + case 'u': + code_length = 4 + case 'U': + code_length = 8 + default: + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found unknown escape character") + return false + } + + skip(parser) + skip(parser) + + // Consume an arbitrary escape code. + if code_length > 0 { + var value int + + // Scan the character value. + if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) { + return false + } + for k := 0; k < code_length; k++ { + if !is_hex(parser.buffer, parser.buffer_pos+k) { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "did not find expected hexdecimal number") + return false + } + value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k) + } + + // Check the value and write the character. + if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found invalid Unicode character escape code") + return false + } + if value <= 0x7F { + s = append(s, byte(value)) + } else if value <= 0x7FF { + s = append(s, byte(0xC0+(value>>6))) + s = append(s, byte(0x80+(value&0x3F))) + } else if value <= 0xFFFF { + s = append(s, byte(0xE0+(value>>12))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } else { + s = append(s, byte(0xF0+(value>>18))) + s = append(s, byte(0x80+((value>>12)&0x3F))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } + + // Advance the pointer. + for k := 0; k < code_length; k++ { + skip(parser) + } + } + } else { + // It is a non-escaped non-blank character. + s = read(parser, s) + } + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we are at the end of the scalar. + if single { + if parser.buffer[parser.buffer_pos] == '\'' { + break + } + } else { + if parser.buffer[parser.buffer_pos] == '"' { + break + } + } + + // Consume blank characters. + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Join the whitespaces or fold line breaks. + if leading_blanks { + // Do we need to fold line breaks? + if len(leading_break) > 0 && leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Eat the right quote. + skip(parser) + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_SINGLE_QUOTED_SCALAR_STYLE, + } + if !single { + token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + return true +} + +// Scan a plain scalar. +func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool { + + var s, leading_break, trailing_breaks, whitespaces []byte + var leading_blanks bool + var indent = parser.indent + 1 + + start_mark := parser.mark + end_mark := parser.mark + + // Consume the content of the plain scalar. + for { + // Check for a document indicator. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + break + } + + // Check for a comment. + if parser.buffer[parser.buffer_pos] == '#' { + break + } + + // Consume non-blank characters. + for !is_blankz(parser.buffer, parser.buffer_pos) { + + // Check for indicators that may end a plain scalar. + if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level > 0 && + (parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}')) { + break + } + + // Check if we need to join whitespaces and breaks. + if leading_blanks || len(whitespaces) > 0 { + if leading_blanks { + // Do we need to fold line breaks? + if leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + leading_blanks = false + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Copy the character. + s = read(parser, s) + + end_mark = parser.mark + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + // Is it the end? + if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) { + break + } + + // Consume blank characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + + // Check for tab characters that abuse indentation. + if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", + start_mark, "found a tab character that violates indentation") + return false + } + + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check indentation level. + if parser.flow_level == 0 && parser.mark.column < indent { + break + } + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_PLAIN_SCALAR_STYLE, + } + + // Note that we change the 'simple_key_allowed' flag. + if leading_blanks { + parser.simple_key_allowed = true + } + return true +} diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/sorter.go b/vendor/sigs.k8s.io/yaml/goyaml.v2/sorter.go new file mode 100644 index 00000000..4c45e660 --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/goyaml.v2/sorter.go @@ -0,0 +1,113 @@ +package yaml + +import ( + "reflect" + "unicode" +) + +type keyList []reflect.Value + +func (l keyList) Len() int { return len(l) } +func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } +func (l keyList) Less(i, j int) bool { + a := l[i] + b := l[j] + ak := a.Kind() + bk := b.Kind() + for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() { + a = a.Elem() + ak = a.Kind() + } + for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() { + b = b.Elem() + bk = b.Kind() + } + af, aok := keyFloat(a) + bf, bok := keyFloat(b) + if aok && bok { + if af != bf { + return af < bf + } + if ak != bk { + return ak < bk + } + return numLess(a, b) + } + if ak != reflect.String || bk != reflect.String { + return ak < bk + } + ar, br := []rune(a.String()), []rune(b.String()) + for i := 0; i < len(ar) && i < len(br); i++ { + if ar[i] == br[i] { + continue + } + al := unicode.IsLetter(ar[i]) + bl := unicode.IsLetter(br[i]) + if al && bl { + return ar[i] < br[i] + } + if al || bl { + return bl + } + var ai, bi int + var an, bn int64 + if ar[i] == '0' || br[i] == '0' { + for j := i-1; j >= 0 && unicode.IsDigit(ar[j]); j-- { + if ar[j] != '0' { + an = 1 + bn = 1 + break + } + } + } + for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ { + an = an*10 + int64(ar[ai]-'0') + } + for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ { + bn = bn*10 + int64(br[bi]-'0') + } + if an != bn { + return an < bn + } + if ai != bi { + return ai < bi + } + return ar[i] < br[i] + } + return len(ar) < len(br) +} + +// keyFloat returns a float value for v if it is a number/bool +// and whether it is a number/bool or not. +func keyFloat(v reflect.Value) (f float64, ok bool) { + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return float64(v.Int()), true + case reflect.Float32, reflect.Float64: + return v.Float(), true + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return float64(v.Uint()), true + case reflect.Bool: + if v.Bool() { + return 1, true + } + return 0, true + } + return 0, false +} + +// numLess returns whether a < b. +// a and b must necessarily have the same kind. +func numLess(a, b reflect.Value) bool { + switch a.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return a.Int() < b.Int() + case reflect.Float32, reflect.Float64: + return a.Float() < b.Float() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return a.Uint() < b.Uint() + case reflect.Bool: + return !a.Bool() && b.Bool() + } + panic("not a number") +} diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/writerc.go b/vendor/sigs.k8s.io/yaml/goyaml.v2/writerc.go new file mode 100644 index 00000000..a2dde608 --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/goyaml.v2/writerc.go @@ -0,0 +1,26 @@ +package yaml + +// Set the writer error and return false. +func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_WRITER_ERROR + emitter.problem = problem + return false +} + +// Flush the output buffer. +func yaml_emitter_flush(emitter *yaml_emitter_t) bool { + if emitter.write_handler == nil { + panic("write handler not set") + } + + // Check if the buffer is empty. + if emitter.buffer_pos == 0 { + return true + } + + if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { + return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) + } + emitter.buffer_pos = 0 + return true +} diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/yaml.go b/vendor/sigs.k8s.io/yaml/goyaml.v2/yaml.go new file mode 100644 index 00000000..30813884 --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/goyaml.v2/yaml.go @@ -0,0 +1,478 @@ +// Package yaml implements YAML support for the Go language. +// +// Source code and other details for the project are available at GitHub: +// +// https://github.com/go-yaml/yaml +// +package yaml + +import ( + "errors" + "fmt" + "io" + "reflect" + "strings" + "sync" +) + +// MapSlice encodes and decodes as a YAML map. +// The order of keys is preserved when encoding and decoding. +type MapSlice []MapItem + +// MapItem is an item in a MapSlice. +type MapItem struct { + Key, Value interface{} +} + +// The Unmarshaler interface may be implemented by types to customize their +// behavior when being unmarshaled from a YAML document. The UnmarshalYAML +// method receives a function that may be called to unmarshal the original +// YAML value into a field or variable. It is safe to call the unmarshal +// function parameter more than once if necessary. +type Unmarshaler interface { + UnmarshalYAML(unmarshal func(interface{}) error) error +} + +// The Marshaler interface may be implemented by types to customize their +// behavior when being marshaled into a YAML document. The returned value +// is marshaled in place of the original value implementing Marshaler. +// +// If an error is returned by MarshalYAML, the marshaling procedure stops +// and returns with the provided error. +type Marshaler interface { + MarshalYAML() (interface{}, error) +} + +// Unmarshal decodes the first document found within the in byte slice +// and assigns decoded values into the out value. +// +// Maps and pointers (to a struct, string, int, etc) are accepted as out +// values. If an internal pointer within a struct is not initialized, +// the yaml package will initialize it if necessary for unmarshalling +// the provided data. The out parameter must not be nil. +// +// The type of the decoded values should be compatible with the respective +// values in out. If one or more values cannot be decoded due to a type +// mismatches, decoding continues partially until the end of the YAML +// content, and a *yaml.TypeError is returned with details for all +// missed values. +// +// Struct fields are only unmarshalled if they are exported (have an +// upper case first letter), and are unmarshalled using the field name +// lowercased as the default key. Custom keys may be defined via the +// "yaml" name in the field tag: the content preceding the first comma +// is used as the key, and the following comma-separated options are +// used to tweak the marshalling process (see Marshal). +// Conflicting names result in a runtime error. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// var t T +// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t) +// +// See the documentation of Marshal for the format of tags and a list of +// supported tag options. +// +func Unmarshal(in []byte, out interface{}) (err error) { + return unmarshal(in, out, false) +} + +// UnmarshalStrict is like Unmarshal except that any fields that are found +// in the data that do not have corresponding struct members, or mapping +// keys that are duplicates, will result in +// an error. +func UnmarshalStrict(in []byte, out interface{}) (err error) { + return unmarshal(in, out, true) +} + +// A Decoder reads and decodes YAML values from an input stream. +type Decoder struct { + strict bool + parser *parser +} + +// NewDecoder returns a new decoder that reads from r. +// +// The decoder introduces its own buffering and may read +// data from r beyond the YAML values requested. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{ + parser: newParserFromReader(r), + } +} + +// SetStrict sets whether strict decoding behaviour is enabled when +// decoding items in the data (see UnmarshalStrict). By default, decoding is not strict. +func (dec *Decoder) SetStrict(strict bool) { + dec.strict = strict +} + +// Decode reads the next YAML-encoded value from its input +// and stores it in the value pointed to by v. +// +// See the documentation for Unmarshal for details about the +// conversion of YAML into a Go value. +func (dec *Decoder) Decode(v interface{}) (err error) { + d := newDecoder(dec.strict) + defer handleErr(&err) + node := dec.parser.parse() + if node == nil { + return io.EOF + } + out := reflect.ValueOf(v) + if out.Kind() == reflect.Ptr && !out.IsNil() { + out = out.Elem() + } + d.unmarshal(node, out) + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +func unmarshal(in []byte, out interface{}, strict bool) (err error) { + defer handleErr(&err) + d := newDecoder(strict) + p := newParser(in) + defer p.destroy() + node := p.parse() + if node != nil { + v := reflect.ValueOf(out) + if v.Kind() == reflect.Ptr && !v.IsNil() { + v = v.Elem() + } + d.unmarshal(node, v) + } + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +// Marshal serializes the value provided into a YAML document. The structure +// of the generated document will reflect the structure of the value itself. +// Maps and pointers (to struct, string, int, etc) are accepted as the in value. +// +// Struct fields are only marshalled if they are exported (have an upper case +// first letter), and are marshalled using the field name lowercased as the +// default key. Custom keys may be defined via the "yaml" name in the field +// tag: the content preceding the first comma is used as the key, and the +// following comma-separated options are used to tweak the marshalling process. +// Conflicting names result in a runtime error. +// +// The field tag format accepted is: +// +// `(...) yaml:"[][,[,]]" (...)` +// +// The following flags are currently supported: +// +// omitempty Only include the field if it's not set to the zero +// value for the type or to empty slices or maps. +// Zero valued structs will be omitted if all their public +// fields are zero, unless they implement an IsZero +// method (see the IsZeroer interface type), in which +// case the field will be excluded if IsZero returns true. +// +// flow Marshal using a flow style (useful for structs, +// sequences and maps). +// +// inline Inline the field, which must be a struct or a map, +// causing all of its fields or keys to be processed as if +// they were part of the outer struct. For maps, keys must +// not conflict with the yaml keys of other struct fields. +// +// In addition, if the key is "-", the field is ignored. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n" +// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n" +// +func Marshal(in interface{}) (out []byte, err error) { + defer handleErr(&err) + e := newEncoder() + defer e.destroy() + e.marshalDoc("", reflect.ValueOf(in)) + e.finish() + out = e.out + return +} + +// An Encoder writes YAML values to an output stream. +type Encoder struct { + encoder *encoder +} + +// NewEncoder returns a new encoder that writes to w. +// The Encoder should be closed after use to flush all data +// to w. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{ + encoder: newEncoderWithWriter(w), + } +} + +// Encode writes the YAML encoding of v to the stream. +// If multiple items are encoded to the stream, the +// second and subsequent document will be preceded +// with a "---" document separator, but the first will not. +// +// See the documentation for Marshal for details about the conversion of Go +// values to YAML. +func (e *Encoder) Encode(v interface{}) (err error) { + defer handleErr(&err) + e.encoder.marshalDoc("", reflect.ValueOf(v)) + return nil +} + +// Close closes the encoder by writing any remaining data. +// It does not write a stream terminating string "...". +func (e *Encoder) Close() (err error) { + defer handleErr(&err) + e.encoder.finish() + return nil +} + +func handleErr(err *error) { + if v := recover(); v != nil { + if e, ok := v.(yamlError); ok { + *err = e.err + } else { + panic(v) + } + } +} + +type yamlError struct { + err error +} + +func fail(err error) { + panic(yamlError{err}) +} + +func failf(format string, args ...interface{}) { + panic(yamlError{fmt.Errorf("yaml: "+format, args...)}) +} + +// A TypeError is returned by Unmarshal when one or more fields in +// the YAML document cannot be properly decoded into the requested +// types. When this error is returned, the value is still +// unmarshaled partially. +type TypeError struct { + Errors []string +} + +func (e *TypeError) Error() string { + return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n ")) +} + +// -------------------------------------------------------------------------- +// Maintain a mapping of keys to structure field indexes + +// The code in this section was copied from mgo/bson. + +// structInfo holds details for the serialization of fields of +// a given struct. +type structInfo struct { + FieldsMap map[string]fieldInfo + FieldsList []fieldInfo + + // InlineMap is the number of the field in the struct that + // contains an ,inline map, or -1 if there's none. + InlineMap int +} + +type fieldInfo struct { + Key string + Num int + OmitEmpty bool + Flow bool + // Id holds the unique field identifier, so we can cheaply + // check for field duplicates without maintaining an extra map. + Id int + + // Inline holds the field index if the field is part of an inlined struct. + Inline []int +} + +var structMap = make(map[reflect.Type]*structInfo) +var fieldMapMutex sync.RWMutex + +func getStructInfo(st reflect.Type) (*structInfo, error) { + fieldMapMutex.RLock() + sinfo, found := structMap[st] + fieldMapMutex.RUnlock() + if found { + return sinfo, nil + } + + n := st.NumField() + fieldsMap := make(map[string]fieldInfo) + fieldsList := make([]fieldInfo, 0, n) + inlineMap := -1 + for i := 0; i != n; i++ { + field := st.Field(i) + if field.PkgPath != "" && !field.Anonymous { + continue // Private field + } + + info := fieldInfo{Num: i} + + tag := field.Tag.Get("yaml") + if tag == "" && strings.Index(string(field.Tag), ":") < 0 { + tag = string(field.Tag) + } + if tag == "-" { + continue + } + + inline := false + fields := strings.Split(tag, ",") + if len(fields) > 1 { + for _, flag := range fields[1:] { + switch flag { + case "omitempty": + info.OmitEmpty = true + case "flow": + info.Flow = true + case "inline": + inline = true + default: + return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st)) + } + } + tag = fields[0] + } + + if inline { + switch field.Type.Kind() { + case reflect.Map: + if inlineMap >= 0 { + return nil, errors.New("Multiple ,inline maps in struct " + st.String()) + } + if field.Type.Key() != reflect.TypeOf("") { + return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String()) + } + inlineMap = info.Num + case reflect.Struct: + sinfo, err := getStructInfo(field.Type) + if err != nil { + return nil, err + } + for _, finfo := range sinfo.FieldsList { + if _, found := fieldsMap[finfo.Key]; found { + msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + if finfo.Inline == nil { + finfo.Inline = []int{i, finfo.Num} + } else { + finfo.Inline = append([]int{i}, finfo.Inline...) + } + finfo.Id = len(fieldsList) + fieldsMap[finfo.Key] = finfo + fieldsList = append(fieldsList, finfo) + } + default: + //return nil, errors.New("Option ,inline needs a struct value or map field") + return nil, errors.New("Option ,inline needs a struct value field") + } + continue + } + + if tag != "" { + info.Key = tag + } else { + info.Key = strings.ToLower(field.Name) + } + + if _, found = fieldsMap[info.Key]; found { + msg := "Duplicated key '" + info.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + + info.Id = len(fieldsList) + fieldsList = append(fieldsList, info) + fieldsMap[info.Key] = info + } + + sinfo = &structInfo{ + FieldsMap: fieldsMap, + FieldsList: fieldsList, + InlineMap: inlineMap, + } + + fieldMapMutex.Lock() + structMap[st] = sinfo + fieldMapMutex.Unlock() + return sinfo, nil +} + +// IsZeroer is used to check whether an object is zero to +// determine whether it should be omitted when marshaling +// with the omitempty flag. One notable implementation +// is time.Time. +type IsZeroer interface { + IsZero() bool +} + +func isZero(v reflect.Value) bool { + kind := v.Kind() + if z, ok := v.Interface().(IsZeroer); ok { + if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() { + return true + } + return z.IsZero() + } + switch kind { + case reflect.String: + return len(v.String()) == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + case reflect.Slice: + return v.Len() == 0 + case reflect.Map: + return v.Len() == 0 + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Struct: + vt := v.Type() + for i := v.NumField() - 1; i >= 0; i-- { + if vt.Field(i).PkgPath != "" { + continue // Private field + } + if !isZero(v.Field(i)) { + return false + } + } + return true + } + return false +} + +// FutureLineWrap globally disables line wrapping when encoding long strings. +// This is a temporary and thus deprecated method introduced to faciliate +// migration towards v3, which offers more control of line lengths on +// individual encodings, and has a default matching the behavior introduced +// by this function. +// +// The default formatting of v2 was erroneously changed in v2.3.0 and reverted +// in v2.4.0, at which point this function was introduced to help migration. +func FutureLineWrap() { + disableLineWrapping = true +} diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/yamlh.go b/vendor/sigs.k8s.io/yaml/goyaml.v2/yamlh.go new file mode 100644 index 00000000..f6a9c8e3 --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/goyaml.v2/yamlh.go @@ -0,0 +1,739 @@ +package yaml + +import ( + "fmt" + "io" +) + +// The version directive data. +type yaml_version_directive_t struct { + major int8 // The major version number. + minor int8 // The minor version number. +} + +// The tag directive data. +type yaml_tag_directive_t struct { + handle []byte // The tag handle. + prefix []byte // The tag prefix. +} + +type yaml_encoding_t int + +// The stream encoding. +const ( + // Let the parser choose the encoding. + yaml_ANY_ENCODING yaml_encoding_t = iota + + yaml_UTF8_ENCODING // The default UTF-8 encoding. + yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM. + yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM. +) + +type yaml_break_t int + +// Line break types. +const ( + // Let the parser choose the break type. + yaml_ANY_BREAK yaml_break_t = iota + + yaml_CR_BREAK // Use CR for line breaks (Mac style). + yaml_LN_BREAK // Use LN for line breaks (Unix style). + yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style). +) + +type yaml_error_type_t int + +// Many bad things could happen with the parser and emitter. +const ( + // No error is produced. + yaml_NO_ERROR yaml_error_type_t = iota + + yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory. + yaml_READER_ERROR // Cannot read or decode the input stream. + yaml_SCANNER_ERROR // Cannot scan the input stream. + yaml_PARSER_ERROR // Cannot parse the input stream. + yaml_COMPOSER_ERROR // Cannot compose a YAML document. + yaml_WRITER_ERROR // Cannot write to the output stream. + yaml_EMITTER_ERROR // Cannot emit a YAML stream. +) + +// The pointer position. +type yaml_mark_t struct { + index int // The position index. + line int // The position line. + column int // The position column. +} + +// Node Styles + +type yaml_style_t int8 + +type yaml_scalar_style_t yaml_style_t + +// Scalar styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota + + yaml_PLAIN_SCALAR_STYLE // The plain scalar style. + yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style. + yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style. + yaml_LITERAL_SCALAR_STYLE // The literal scalar style. + yaml_FOLDED_SCALAR_STYLE // The folded scalar style. +) + +type yaml_sequence_style_t yaml_style_t + +// Sequence styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota + + yaml_BLOCK_SEQUENCE_STYLE // The block sequence style. + yaml_FLOW_SEQUENCE_STYLE // The flow sequence style. +) + +type yaml_mapping_style_t yaml_style_t + +// Mapping styles. +const ( + // Let the emitter choose the style. + yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota + + yaml_BLOCK_MAPPING_STYLE // The block mapping style. + yaml_FLOW_MAPPING_STYLE // The flow mapping style. +) + +// Tokens + +type yaml_token_type_t int + +// Token types. +const ( + // An empty token. + yaml_NO_TOKEN yaml_token_type_t = iota + + yaml_STREAM_START_TOKEN // A STREAM-START token. + yaml_STREAM_END_TOKEN // A STREAM-END token. + + yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token. + yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token. + yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token. + yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token. + + yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token. + yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token. + yaml_BLOCK_END_TOKEN // A BLOCK-END token. + + yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token. + yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token. + yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token. + yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token. + + yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token. + yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token. + yaml_KEY_TOKEN // A KEY token. + yaml_VALUE_TOKEN // A VALUE token. + + yaml_ALIAS_TOKEN // An ALIAS token. + yaml_ANCHOR_TOKEN // An ANCHOR token. + yaml_TAG_TOKEN // A TAG token. + yaml_SCALAR_TOKEN // A SCALAR token. +) + +func (tt yaml_token_type_t) String() string { + switch tt { + case yaml_NO_TOKEN: + return "yaml_NO_TOKEN" + case yaml_STREAM_START_TOKEN: + return "yaml_STREAM_START_TOKEN" + case yaml_STREAM_END_TOKEN: + return "yaml_STREAM_END_TOKEN" + case yaml_VERSION_DIRECTIVE_TOKEN: + return "yaml_VERSION_DIRECTIVE_TOKEN" + case yaml_TAG_DIRECTIVE_TOKEN: + return "yaml_TAG_DIRECTIVE_TOKEN" + case yaml_DOCUMENT_START_TOKEN: + return "yaml_DOCUMENT_START_TOKEN" + case yaml_DOCUMENT_END_TOKEN: + return "yaml_DOCUMENT_END_TOKEN" + case yaml_BLOCK_SEQUENCE_START_TOKEN: + return "yaml_BLOCK_SEQUENCE_START_TOKEN" + case yaml_BLOCK_MAPPING_START_TOKEN: + return "yaml_BLOCK_MAPPING_START_TOKEN" + case yaml_BLOCK_END_TOKEN: + return "yaml_BLOCK_END_TOKEN" + case yaml_FLOW_SEQUENCE_START_TOKEN: + return "yaml_FLOW_SEQUENCE_START_TOKEN" + case yaml_FLOW_SEQUENCE_END_TOKEN: + return "yaml_FLOW_SEQUENCE_END_TOKEN" + case yaml_FLOW_MAPPING_START_TOKEN: + return "yaml_FLOW_MAPPING_START_TOKEN" + case yaml_FLOW_MAPPING_END_TOKEN: + return "yaml_FLOW_MAPPING_END_TOKEN" + case yaml_BLOCK_ENTRY_TOKEN: + return "yaml_BLOCK_ENTRY_TOKEN" + case yaml_FLOW_ENTRY_TOKEN: + return "yaml_FLOW_ENTRY_TOKEN" + case yaml_KEY_TOKEN: + return "yaml_KEY_TOKEN" + case yaml_VALUE_TOKEN: + return "yaml_VALUE_TOKEN" + case yaml_ALIAS_TOKEN: + return "yaml_ALIAS_TOKEN" + case yaml_ANCHOR_TOKEN: + return "yaml_ANCHOR_TOKEN" + case yaml_TAG_TOKEN: + return "yaml_TAG_TOKEN" + case yaml_SCALAR_TOKEN: + return "yaml_SCALAR_TOKEN" + } + return "" +} + +// The token structure. +type yaml_token_t struct { + // The token type. + typ yaml_token_type_t + + // The start/end of the token. + start_mark, end_mark yaml_mark_t + + // The stream encoding (for yaml_STREAM_START_TOKEN). + encoding yaml_encoding_t + + // The alias/anchor/scalar value or tag/tag directive handle + // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN). + value []byte + + // The tag suffix (for yaml_TAG_TOKEN). + suffix []byte + + // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN). + prefix []byte + + // The scalar style (for yaml_SCALAR_TOKEN). + style yaml_scalar_style_t + + // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN). + major, minor int8 +} + +// Events + +type yaml_event_type_t int8 + +// Event types. +const ( + // An empty event. + yaml_NO_EVENT yaml_event_type_t = iota + + yaml_STREAM_START_EVENT // A STREAM-START event. + yaml_STREAM_END_EVENT // A STREAM-END event. + yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event. + yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event. + yaml_ALIAS_EVENT // An ALIAS event. + yaml_SCALAR_EVENT // A SCALAR event. + yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event. + yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event. + yaml_MAPPING_START_EVENT // A MAPPING-START event. + yaml_MAPPING_END_EVENT // A MAPPING-END event. +) + +var eventStrings = []string{ + yaml_NO_EVENT: "none", + yaml_STREAM_START_EVENT: "stream start", + yaml_STREAM_END_EVENT: "stream end", + yaml_DOCUMENT_START_EVENT: "document start", + yaml_DOCUMENT_END_EVENT: "document end", + yaml_ALIAS_EVENT: "alias", + yaml_SCALAR_EVENT: "scalar", + yaml_SEQUENCE_START_EVENT: "sequence start", + yaml_SEQUENCE_END_EVENT: "sequence end", + yaml_MAPPING_START_EVENT: "mapping start", + yaml_MAPPING_END_EVENT: "mapping end", +} + +func (e yaml_event_type_t) String() string { + if e < 0 || int(e) >= len(eventStrings) { + return fmt.Sprintf("unknown event %d", e) + } + return eventStrings[e] +} + +// The event structure. +type yaml_event_t struct { + + // The event type. + typ yaml_event_type_t + + // The start and end of the event. + start_mark, end_mark yaml_mark_t + + // The document encoding (for yaml_STREAM_START_EVENT). + encoding yaml_encoding_t + + // The version directive (for yaml_DOCUMENT_START_EVENT). + version_directive *yaml_version_directive_t + + // The list of tag directives (for yaml_DOCUMENT_START_EVENT). + tag_directives []yaml_tag_directive_t + + // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT). + anchor []byte + + // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + tag []byte + + // The scalar value (for yaml_SCALAR_EVENT). + value []byte + + // Is the document start/end indicator implicit, or the tag optional? + // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT). + implicit bool + + // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT). + quoted_implicit bool + + // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + style yaml_style_t +} + +func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) } +func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) } +func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) } + +// Nodes + +const ( + yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null. + yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false. + yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values. + yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values. + yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values. + yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values. + + yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences. + yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping. + + // Not in original libyaml. + yaml_BINARY_TAG = "tag:yaml.org,2002:binary" + yaml_MERGE_TAG = "tag:yaml.org,2002:merge" + + yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str. + yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq. + yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map. +) + +type yaml_node_type_t int + +// Node types. +const ( + // An empty node. + yaml_NO_NODE yaml_node_type_t = iota + + yaml_SCALAR_NODE // A scalar node. + yaml_SEQUENCE_NODE // A sequence node. + yaml_MAPPING_NODE // A mapping node. +) + +// An element of a sequence node. +type yaml_node_item_t int + +// An element of a mapping node. +type yaml_node_pair_t struct { + key int // The key of the element. + value int // The value of the element. +} + +// The node structure. +type yaml_node_t struct { + typ yaml_node_type_t // The node type. + tag []byte // The node tag. + + // The node data. + + // The scalar parameters (for yaml_SCALAR_NODE). + scalar struct { + value []byte // The scalar value. + length int // The length of the scalar value. + style yaml_scalar_style_t // The scalar style. + } + + // The sequence parameters (for YAML_SEQUENCE_NODE). + sequence struct { + items_data []yaml_node_item_t // The stack of sequence items. + style yaml_sequence_style_t // The sequence style. + } + + // The mapping parameters (for yaml_MAPPING_NODE). + mapping struct { + pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value). + pairs_start *yaml_node_pair_t // The beginning of the stack. + pairs_end *yaml_node_pair_t // The end of the stack. + pairs_top *yaml_node_pair_t // The top of the stack. + style yaml_mapping_style_t // The mapping style. + } + + start_mark yaml_mark_t // The beginning of the node. + end_mark yaml_mark_t // The end of the node. + +} + +// The document structure. +type yaml_document_t struct { + + // The document nodes. + nodes []yaml_node_t + + // The version directive. + version_directive *yaml_version_directive_t + + // The list of tag directives. + tag_directives_data []yaml_tag_directive_t + tag_directives_start int // The beginning of the tag directives list. + tag_directives_end int // The end of the tag directives list. + + start_implicit int // Is the document start indicator implicit? + end_implicit int // Is the document end indicator implicit? + + // The start/end of the document. + start_mark, end_mark yaml_mark_t +} + +// The prototype of a read handler. +// +// The read handler is called when the parser needs to read more bytes from the +// source. The handler should write not more than size bytes to the buffer. +// The number of written bytes should be set to the size_read variable. +// +// [in,out] data A pointer to an application data specified by +// yaml_parser_set_input(). +// [out] buffer The buffer to write the data from the source. +// [in] size The size of the buffer. +// [out] size_read The actual number of bytes read from the source. +// +// On success, the handler should return 1. If the handler failed, +// the returned value should be 0. On EOF, the handler should set the +// size_read to 0 and return 1. +type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error) + +// This structure holds information about a potential simple key. +type yaml_simple_key_t struct { + possible bool // Is a simple key possible? + required bool // Is a simple key required? + token_number int // The number of the token. + mark yaml_mark_t // The position mark. +} + +// The states of the parser. +type yaml_parser_state_t int + +const ( + yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota + + yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document. + yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START. + yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_PARSE_BLOCK_NODE_STATE // Expect a block node. + yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence. + yaml_PARSE_FLOW_NODE_STATE // Expect a flow node. + yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence. + yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence. + yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence. + yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key. + yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value. + yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry. + yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping. + yaml_PARSE_END_STATE // Expect nothing. +) + +func (ps yaml_parser_state_t) String() string { + switch ps { + case yaml_PARSE_STREAM_START_STATE: + return "yaml_PARSE_STREAM_START_STATE" + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_START_STATE: + return "yaml_PARSE_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return "yaml_PARSE_DOCUMENT_CONTENT_STATE" + case yaml_PARSE_DOCUMENT_END_STATE: + return "yaml_PARSE_DOCUMENT_END_STATE" + case yaml_PARSE_BLOCK_NODE_STATE: + return "yaml_PARSE_BLOCK_NODE_STATE" + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE" + case yaml_PARSE_FLOW_NODE_STATE: + return "yaml_PARSE_FLOW_NODE_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE" + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE" + case yaml_PARSE_END_STATE: + return "yaml_PARSE_END_STATE" + } + return "" +} + +// This structure holds aliases data. +type yaml_alias_data_t struct { + anchor []byte // The anchor. + index int // The node id. + mark yaml_mark_t // The anchor mark. +} + +// The parser structure. +// +// All members are internal. Manage the structure using the +// yaml_parser_ family of functions. +type yaml_parser_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + + problem string // Error description. + + // The byte about which the problem occurred. + problem_offset int + problem_value int + problem_mark yaml_mark_t + + // The error context. + context string + context_mark yaml_mark_t + + // Reader stuff + + read_handler yaml_read_handler_t // Read handler. + + input_reader io.Reader // File input data. + input []byte // String input data. + input_pos int + + eof bool // EOF flag + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + unread int // The number of unread characters in the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The input encoding. + + offset int // The offset of the current position (in bytes). + mark yaml_mark_t // The mark of the current position. + + // Scanner stuff + + stream_start_produced bool // Have we started to scan the input stream? + stream_end_produced bool // Have we reached the end of the input stream? + + flow_level int // The number of unclosed '[' and '{' indicators. + + tokens []yaml_token_t // The tokens queue. + tokens_head int // The head of the tokens queue. + tokens_parsed int // The number of tokens fetched from the queue. + token_available bool // Does the tokens queue contain a token ready for dequeueing. + + indent int // The current indentation level. + indents []int // The indentation levels stack. + + simple_key_allowed bool // May a simple key occur at the current position? + simple_keys []yaml_simple_key_t // The stack of simple keys. + simple_keys_by_tok map[int]int // possible simple_key indexes indexed by token_number + + // Parser stuff + + state yaml_parser_state_t // The current parser state. + states []yaml_parser_state_t // The parser states stack. + marks []yaml_mark_t // The stack of marks. + tag_directives []yaml_tag_directive_t // The list of TAG directives. + + // Dumper stuff + + aliases []yaml_alias_data_t // The alias data. + + document *yaml_document_t // The currently parsed document. +} + +// Emitter Definitions + +// The prototype of a write handler. +// +// The write handler is called when the emitter needs to flush the accumulated +// characters to the output. The handler should write @a size bytes of the +// @a buffer to the output. +// +// @param[in,out] data A pointer to an application data specified by +// yaml_emitter_set_output(). +// @param[in] buffer The buffer with bytes to be written. +// @param[in] size The size of the buffer. +// +// @returns On success, the handler should return @c 1. If the handler failed, +// the returned value should be @c 0. +// +type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error + +type yaml_emitter_state_t int + +// The emitter states. +const ( + // Expect STREAM-START. + yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota + + yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence. + yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence. + yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence. + yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence. + yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping. + yaml_EMIT_END_STATE // Expect nothing. +) + +// The emitter structure. +// +// All members are internal. Manage the structure using the @c yaml_emitter_ +// family of functions. +type yaml_emitter_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + problem string // Error description. + + // Writer stuff + + write_handler yaml_write_handler_t // Write handler. + + output_buffer *[]byte // String output data. + output_writer io.Writer // File output data. + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The stream encoding. + + // Emitter stuff + + canonical bool // If the output is in the canonical style? + best_indent int // The number of indentation spaces. + best_width int // The preferred width of the output lines. + unicode bool // Allow unescaped non-ASCII characters? + line_break yaml_break_t // The preferred line break. + + state yaml_emitter_state_t // The current emitter state. + states []yaml_emitter_state_t // The stack of states. + + events []yaml_event_t // The event queue. + events_head int // The head of the event queue. + + indents []int // The stack of indentation levels. + + tag_directives []yaml_tag_directive_t // The list of tag directives. + + indent int // The current indentation level. + + flow_level int // The current flow level. + + root_context bool // Is it the document root context? + sequence_context bool // Is it a sequence context? + mapping_context bool // Is it a mapping context? + simple_key_context bool // Is it a simple mapping key context? + + line int // The current line. + column int // The current column. + whitespace bool // If the last character was a whitespace? + indention bool // If the last character was an indentation character (' ', '-', '?', ':')? + open_ended bool // If an explicit document end is required? + + // Anchor analysis. + anchor_data struct { + anchor []byte // The anchor value. + alias bool // Is it an alias? + } + + // Tag analysis. + tag_data struct { + handle []byte // The tag handle. + suffix []byte // The tag suffix. + } + + // Scalar analysis. + scalar_data struct { + value []byte // The scalar value. + multiline bool // Does the scalar contain line breaks? + flow_plain_allowed bool // Can the scalar be expessed in the flow plain style? + block_plain_allowed bool // Can the scalar be expressed in the block plain style? + single_quoted_allowed bool // Can the scalar be expressed in the single quoted style? + block_allowed bool // Can the scalar be expressed in the literal or folded styles? + style yaml_scalar_style_t // The output style. + } + + // Dumper stuff + + opened bool // If the stream was already opened? + closed bool // If the stream was already closed? + + // The information associated with the document nodes. + anchors *struct { + references int // The number of references. + anchor int // The anchor id. + serialized bool // If the node has been emitted? + } + + last_anchor_id int // The last assigned anchor id. + + document *yaml_document_t // The currently emitted document. +} diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/yamlprivateh.go b/vendor/sigs.k8s.io/yaml/goyaml.v2/yamlprivateh.go new file mode 100644 index 00000000..8110ce3c --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/goyaml.v2/yamlprivateh.go @@ -0,0 +1,173 @@ +package yaml + +const ( + // The size of the input raw buffer. + input_raw_buffer_size = 512 + + // The size of the input buffer. + // It should be possible to decode the whole raw buffer. + input_buffer_size = input_raw_buffer_size * 3 + + // The size of the output buffer. + output_buffer_size = 128 + + // The size of the output raw buffer. + // It should be possible to encode the whole output buffer. + output_raw_buffer_size = (output_buffer_size*2 + 2) + + // The size of other stacks and queues. + initial_stack_size = 16 + initial_queue_size = 16 + initial_string_size = 16 +) + +// Check if the character at the specified position is an alphabetical +// character, a digit, '_', or '-'. +func is_alpha(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-' +} + +// Check if the character at the specified position is a digit. +func is_digit(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' +} + +// Get the value of a digit. +func as_digit(b []byte, i int) int { + return int(b[i]) - '0' +} + +// Check if the character at the specified position is a hex-digit. +func is_hex(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f' +} + +// Get the value of a hex-digit. +func as_hex(b []byte, i int) int { + bi := b[i] + if bi >= 'A' && bi <= 'F' { + return int(bi) - 'A' + 10 + } + if bi >= 'a' && bi <= 'f' { + return int(bi) - 'a' + 10 + } + return int(bi) - '0' +} + +// Check if the character is ASCII. +func is_ascii(b []byte, i int) bool { + return b[i] <= 0x7F +} + +// Check if the character at the start of the buffer can be printed unescaped. +func is_printable(b []byte, i int) bool { + return ((b[i] == 0x0A) || // . == #x0A + (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E + (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF + (b[i] > 0xC2 && b[i] < 0xED) || + (b[i] == 0xED && b[i+1] < 0xA0) || + (b[i] == 0xEE) || + (b[i] == 0xEF && // #xE000 <= . <= #xFFFD + !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF + !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF)))) +} + +// Check if the character at the specified position is NUL. +func is_z(b []byte, i int) bool { + return b[i] == 0x00 +} + +// Check if the beginning of the buffer is a BOM. +func is_bom(b []byte, i int) bool { + return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF +} + +// Check if the character at the specified position is space. +func is_space(b []byte, i int) bool { + return b[i] == ' ' +} + +// Check if the character at the specified position is tab. +func is_tab(b []byte, i int) bool { + return b[i] == '\t' +} + +// Check if the character at the specified position is blank (space or tab). +func is_blank(b []byte, i int) bool { + //return is_space(b, i) || is_tab(b, i) + return b[i] == ' ' || b[i] == '\t' +} + +// Check if the character at the specified position is a line break. +func is_break(b []byte, i int) bool { + return (b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029) +} + +func is_crlf(b []byte, i int) bool { + return b[i] == '\r' && b[i+1] == '\n' +} + +// Check if the character is a line break or NUL. +func is_breakz(b []byte, i int) bool { + //return is_break(b, i) || is_z(b, i) + return ( // is_break: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + // is_z: + b[i] == 0) +} + +// Check if the character is a line break, space, or NUL. +func is_spacez(b []byte, i int) bool { + //return is_space(b, i) || is_breakz(b, i) + return ( // is_space: + b[i] == ' ' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Check if the character is a line break, space, tab, or NUL. +func is_blankz(b []byte, i int) bool { + //return is_blank(b, i) || is_breakz(b, i) + return ( // is_blank: + b[i] == ' ' || b[i] == '\t' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Determine the width of the character. +func width(b byte) int { + // Don't replace these by a switch without first + // confirming that it is being inlined. + if b&0x80 == 0x00 { + return 1 + } + if b&0xE0 == 0xC0 { + return 2 + } + if b&0xF0 == 0xE0 { + return 3 + } + if b&0xF8 == 0xF0 { + return 4 + } + return 0 + +} diff --git a/vendor/sigs.k8s.io/yaml/yaml.go b/vendor/sigs.k8s.io/yaml/yaml.go index efbc535d..fc10246b 100644 --- a/vendor/sigs.k8s.io/yaml/yaml.go +++ b/vendor/sigs.k8s.io/yaml/yaml.go @@ -1,3 +1,19 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package yaml import ( @@ -8,56 +24,59 @@ import ( "reflect" "strconv" - "gopkg.in/yaml.v2" + "sigs.k8s.io/yaml/goyaml.v2" ) -// Marshal marshals the object into JSON then converts JSON to YAML and returns the -// YAML. -func Marshal(o interface{}) ([]byte, error) { - j, err := json.Marshal(o) +// Marshal marshals obj into JSON using stdlib json.Marshal, and then converts JSON to YAML using JSONToYAML (see that method for more reference) +func Marshal(obj interface{}) ([]byte, error) { + jsonBytes, err := json.Marshal(obj) if err != nil { - return nil, fmt.Errorf("error marshaling into JSON: %v", err) + return nil, fmt.Errorf("error marshaling into JSON: %w", err) } - y, err := JSONToYAML(j) - if err != nil { - return nil, fmt.Errorf("error converting JSON to YAML: %v", err) - } - - return y, nil + return JSONToYAML(jsonBytes) } // JSONOpt is a decoding option for decoding from JSON format. type JSONOpt func(*json.Decoder) *json.Decoder -// Unmarshal converts YAML to JSON then uses JSON to unmarshal into an object, -// optionally configuring the behavior of the JSON unmarshal. -func Unmarshal(y []byte, o interface{}, opts ...JSONOpt) error { - return yamlUnmarshal(y, o, false, opts...) +// Unmarshal first converts the given YAML to JSON, and then unmarshals the JSON into obj. Options for the +// standard library json.Decoder can be optionally specified, e.g. to decode untyped numbers into json.Number instead of float64, or to disallow unknown fields (but for that purpose, see also UnmarshalStrict). obj must be a non-nil pointer. +// +// Important notes about the Unmarshal logic: +// +// - Decoding is case-insensitive, unlike the rest of Kubernetes API machinery, as this is using the stdlib json library. This might be confusing to users. +// - This decodes any number (although it is an integer) into a float64 if the type of obj is unknown, e.g. *map[string]interface{}, *interface{}, or *[]interface{}. This means integers above +/- 2^53 will lose precision when round-tripping. Make a JSONOpt that calls d.UseNumber() to avoid this. +// - Duplicate fields, including in-case-sensitive matches, are ignored in an undefined order. Note that the YAML specification forbids duplicate fields, so this logic is more permissive than it needs to. See UnmarshalStrict for an alternative. +// - Unknown fields, i.e. serialized data that do not map to a field in obj, are ignored. Use d.DisallowUnknownFields() or UnmarshalStrict to override. +// - As per the YAML 1.1 specification, which yaml.v2 used underneath implements, literal 'yes' and 'no' strings without quotation marks will be converted to true/false implicitly. +// - YAML non-string keys, e.g. ints, bools and floats, are converted to strings implicitly during the YAML to JSON conversion process. +// - There are no compatibility guarantees for returned error values. +func Unmarshal(yamlBytes []byte, obj interface{}, opts ...JSONOpt) error { + return unmarshal(yamlBytes, obj, yaml.Unmarshal, opts...) } -// UnmarshalStrict strictly converts YAML to JSON then uses JSON to unmarshal -// into an object, optionally configuring the behavior of the JSON unmarshal. -func UnmarshalStrict(y []byte, o interface{}, opts ...JSONOpt) error { - return yamlUnmarshal(y, o, true, append(opts, DisallowUnknownFields)...) +// UnmarshalStrict is similar to Unmarshal (please read its documentation for reference), with the following exceptions: +// +// - Duplicate fields in an object yield an error. This is according to the YAML specification. +// - If obj, or any of its recursive children, is a struct, presence of fields in the serialized data unknown to the struct will yield an error. +func UnmarshalStrict(yamlBytes []byte, obj interface{}, opts ...JSONOpt) error { + return unmarshal(yamlBytes, obj, yaml.UnmarshalStrict, append(opts, DisallowUnknownFields)...) } -// yamlUnmarshal unmarshals the given YAML byte stream into the given interface, +// unmarshal unmarshals the given YAML byte stream into the given interface, // optionally performing the unmarshalling strictly -func yamlUnmarshal(y []byte, o interface{}, strict bool, opts ...JSONOpt) error { - vo := reflect.ValueOf(o) - unmarshalFn := yaml.Unmarshal - if strict { - unmarshalFn = yaml.UnmarshalStrict - } - j, err := yamlToJSON(y, &vo, unmarshalFn) +func unmarshal(yamlBytes []byte, obj interface{}, unmarshalFn func([]byte, interface{}) error, opts ...JSONOpt) error { + jsonTarget := reflect.ValueOf(obj) + + jsonBytes, err := yamlToJSONTarget(yamlBytes, &jsonTarget, unmarshalFn) if err != nil { - return fmt.Errorf("error converting YAML to JSON: %v", err) + return fmt.Errorf("error converting YAML to JSON: %w", err) } - err = jsonUnmarshal(bytes.NewReader(j), o, opts...) + err = jsonUnmarshal(bytes.NewReader(jsonBytes), obj, opts...) if err != nil { - return fmt.Errorf("error unmarshaling JSON: %v", err) + return fmt.Errorf("error unmarshaling JSON: %w", err) } return nil @@ -67,21 +86,26 @@ func yamlUnmarshal(y []byte, o interface{}, strict bool, opts ...JSONOpt) error // object, optionally applying decoder options prior to decoding. We are not // using json.Unmarshal directly as we want the chance to pass in non-default // options. -func jsonUnmarshal(r io.Reader, o interface{}, opts ...JSONOpt) error { - d := json.NewDecoder(r) +func jsonUnmarshal(reader io.Reader, obj interface{}, opts ...JSONOpt) error { + d := json.NewDecoder(reader) for _, opt := range opts { d = opt(d) } - if err := d.Decode(&o); err != nil { + if err := d.Decode(&obj); err != nil { return fmt.Errorf("while decoding JSON: %v", err) } return nil } -// JSONToYAML Converts JSON to YAML. +// JSONToYAML converts JSON to YAML. Notable implementation details: +// +// - Duplicate fields, are case-sensitively ignored in an undefined order. +// - The sequence indentation style is compact, which means that the "- " marker for a YAML sequence will be on the same indentation level as the sequence field name. +// - Unlike Unmarshal, all integers, up to 64 bits, are preserved during this round-trip. func JSONToYAML(j []byte) ([]byte, error) { // Convert the JSON to an object. var jsonObj interface{} + // We are using yaml.Unmarshal here (instead of json.Unmarshal) because the // Go JSON library doesn't try to pick the right number type (int, float, // etc.) when unmarshalling to interface{}, it just picks float64 @@ -93,35 +117,46 @@ func JSONToYAML(j []byte) ([]byte, error) { } // Marshal this object into YAML. - return yaml.Marshal(jsonObj) + yamlBytes, err := yaml.Marshal(jsonObj) + if err != nil { + return nil, err + } + + return yamlBytes, nil } // YAMLToJSON converts YAML to JSON. Since JSON is a subset of YAML, // passing JSON through this method should be a no-op. // -// Things YAML can do that are not supported by JSON: -// * In YAML you can have binary and null keys in your maps. These are invalid -// in JSON. (int and float keys are converted to strings.) -// * Binary data in YAML with the !!binary tag is not supported. If you want to -// use binary data with this library, encode the data as base64 as usual but do -// not use the !!binary tag in your YAML. This will ensure the original base64 -// encoded data makes it all the way through to the JSON. +// Some things YAML can do that are not supported by JSON: +// - In YAML you can have binary and null keys in your maps. These are invalid +// in JSON, and therefore int, bool and float keys are converted to strings implicitly. +// - Binary data in YAML with the !!binary tag is not supported. If you want to +// use binary data with this library, encode the data as base64 as usual but do +// not use the !!binary tag in your YAML. This will ensure the original base64 +// encoded data makes it all the way through to the JSON. +// - And more... read the YAML specification for more details. +// +// Notable about the implementation: // -// For strict decoding of YAML, use YAMLToJSONStrict. +// - Duplicate fields are case-sensitively ignored in an undefined order. Note that the YAML specification forbids duplicate fields, so this logic is more permissive than it needs to. See YAMLToJSONStrict for an alternative. +// - As per the YAML 1.1 specification, which yaml.v2 used underneath implements, literal 'yes' and 'no' strings without quotation marks will be converted to true/false implicitly. +// - Unlike Unmarshal, all integers, up to 64 bits, are preserved during this round-trip. +// - There are no compatibility guarantees for returned error values. func YAMLToJSON(y []byte) ([]byte, error) { - return yamlToJSON(y, nil, yaml.Unmarshal) + return yamlToJSONTarget(y, nil, yaml.Unmarshal) } // YAMLToJSONStrict is like YAMLToJSON but enables strict YAML decoding, // returning an error on any duplicate field names. func YAMLToJSONStrict(y []byte) ([]byte, error) { - return yamlToJSON(y, nil, yaml.UnmarshalStrict) + return yamlToJSONTarget(y, nil, yaml.UnmarshalStrict) } -func yamlToJSON(y []byte, jsonTarget *reflect.Value, yamlUnmarshal func([]byte, interface{}) error) ([]byte, error) { +func yamlToJSONTarget(yamlBytes []byte, jsonTarget *reflect.Value, unmarshalFn func([]byte, interface{}) error) ([]byte, error) { // Convert the YAML to an object. var yamlObj interface{} - err := yamlUnmarshal(y, &yamlObj) + err := unmarshalFn(yamlBytes, &yamlObj) if err != nil { return nil, err } @@ -136,7 +171,11 @@ func yamlToJSON(y []byte, jsonTarget *reflect.Value, yamlUnmarshal func([]byte, } // Convert this object to JSON and return the data. - return json.Marshal(jsonObj) + jsonBytes, err := json.Marshal(jsonObj) + if err != nil { + return nil, err + } + return jsonBytes, nil } func convertToJSONableObject(yamlObj interface{}, jsonTarget *reflect.Value) (interface{}, error) { @@ -147,13 +186,13 @@ func convertToJSONableObject(yamlObj interface{}, jsonTarget *reflect.Value) (in // decoding into the value, we're just checking if the ultimate target is a // string. if jsonTarget != nil { - ju, tu, pv := indirect(*jsonTarget, false) + jsonUnmarshaler, textUnmarshaler, pointerValue := indirect(*jsonTarget, false) // We have a JSON or Text Umarshaler at this level, so we can't be trying // to decode into a string. - if ju != nil || tu != nil { + if jsonUnmarshaler != nil || textUnmarshaler != nil { jsonTarget = nil } else { - jsonTarget = &pv + jsonTarget = &pointerValue } } @@ -205,7 +244,7 @@ func convertToJSONableObject(yamlObj interface{}, jsonTarget *reflect.Value) (in keyString = "false" } default: - return nil, fmt.Errorf("Unsupported map key of type: %s, key: %+#v, value: %+#v", + return nil, fmt.Errorf("unsupported map key of type: %s, key: %+#v, value: %+#v", reflect.TypeOf(k), k, v) } diff --git a/vendor/sigs.k8s.io/yaml/yaml_go110.go b/vendor/sigs.k8s.io/yaml/yaml_go110.go index ab3e06a2..94abc171 100644 --- a/vendor/sigs.k8s.io/yaml/yaml_go110.go +++ b/vendor/sigs.k8s.io/yaml/yaml_go110.go @@ -1,7 +1,24 @@ // This file contains changes that are only compatible with go 1.10 and onwards. +//go:build go1.10 // +build go1.10 +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package yaml import "encoding/json" diff --git a/vendor/tags.cncf.io/container-device-interface/LICENSE b/vendor/tags.cncf.io/container-device-interface/LICENSE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/vendor/tags.cncf.io/container-device-interface/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/tags.cncf.io/container-device-interface/pkg/parser/parser.go b/vendor/tags.cncf.io/container-device-interface/pkg/parser/parser.go new file mode 100644 index 00000000..53259895 --- /dev/null +++ b/vendor/tags.cncf.io/container-device-interface/pkg/parser/parser.go @@ -0,0 +1,212 @@ +/* + Copyright © The CDI Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package parser + +import ( + "fmt" + "strings" +) + +// QualifiedName returns the qualified name for a device. +// The syntax for a qualified device names is +// +// "/=". +// +// A valid vendor and class name may contain the following runes: +// +// 'A'-'Z', 'a'-'z', '0'-'9', '.', '-', '_'. +// +// A valid device name may contain the following runes: +// +// 'A'-'Z', 'a'-'z', '0'-'9', '-', '_', '.', ':' +func QualifiedName(vendor, class, name string) string { + return vendor + "/" + class + "=" + name +} + +// IsQualifiedName tests if a device name is qualified. +func IsQualifiedName(device string) bool { + _, _, _, err := ParseQualifiedName(device) + return err == nil +} + +// ParseQualifiedName splits a qualified name into device vendor, class, +// and name. If the device fails to parse as a qualified name, or if any +// of the split components fail to pass syntax validation, vendor and +// class are returned as empty, together with the verbatim input as the +// name and an error describing the reason for failure. +func ParseQualifiedName(device string) (string, string, string, error) { + vendor, class, name := ParseDevice(device) + + if vendor == "" { + return "", "", device, fmt.Errorf("unqualified device %q, missing vendor", device) + } + if class == "" { + return "", "", device, fmt.Errorf("unqualified device %q, missing class", device) + } + if name == "" { + return "", "", device, fmt.Errorf("unqualified device %q, missing device name", device) + } + + if err := ValidateVendorName(vendor); err != nil { + return "", "", device, fmt.Errorf("invalid device %q: %w", device, err) + } + if err := ValidateClassName(class); err != nil { + return "", "", device, fmt.Errorf("invalid device %q: %w", device, err) + } + if err := ValidateDeviceName(name); err != nil { + return "", "", device, fmt.Errorf("invalid device %q: %w", device, err) + } + + return vendor, class, name, nil +} + +// ParseDevice tries to split a device name into vendor, class, and name. +// If this fails, for instance in the case of unqualified device names, +// ParseDevice returns an empty vendor and class together with name set +// to the verbatim input. +func ParseDevice(device string) (string, string, string) { + if device == "" || device[0] == '/' { + return "", "", device + } + + parts := strings.SplitN(device, "=", 2) + if len(parts) != 2 || parts[0] == "" || parts[1] == "" { + return "", "", device + } + + name := parts[1] + vendor, class := ParseQualifier(parts[0]) + if vendor == "" { + return "", "", device + } + + return vendor, class, name +} + +// ParseQualifier splits a device qualifier into vendor and class. +// The syntax for a device qualifier is +// +// "/" +// +// If parsing fails, an empty vendor and the class set to the +// verbatim input is returned. +func ParseQualifier(kind string) (string, string) { + parts := strings.SplitN(kind, "/", 2) + if len(parts) != 2 || parts[0] == "" || parts[1] == "" { + return "", kind + } + return parts[0], parts[1] +} + +// ValidateVendorName checks the validity of a vendor name. +// A vendor name may contain the following ASCII characters: +// - upper- and lowercase letters ('A'-'Z', 'a'-'z') +// - digits ('0'-'9') +// - underscore, dash, and dot ('_', '-', and '.') +func ValidateVendorName(vendor string) error { + err := validateVendorOrClassName(vendor) + if err != nil { + err = fmt.Errorf("invalid vendor. %w", err) + } + return err +} + +// ValidateClassName checks the validity of class name. +// A class name may contain the following ASCII characters: +// - upper- and lowercase letters ('A'-'Z', 'a'-'z') +// - digits ('0'-'9') +// - underscore, dash, and dot ('_', '-', and '.') +func ValidateClassName(class string) error { + err := validateVendorOrClassName(class) + if err != nil { + err = fmt.Errorf("invalid class. %w", err) + } + return err +} + +// validateVendorOrClassName checks the validity of vendor or class name. +// A name may contain the following ASCII characters: +// - upper- and lowercase letters ('A'-'Z', 'a'-'z') +// - digits ('0'-'9') +// - underscore, dash, and dot ('_', '-', and '.') +func validateVendorOrClassName(name string) error { + if name == "" { + return fmt.Errorf("empty name") + } + if !IsLetter(rune(name[0])) { + return fmt.Errorf("%q, should start with letter", name) + } + for _, c := range string(name[1 : len(name)-1]) { + switch { + case IsAlphaNumeric(c): + case c == '_' || c == '-' || c == '.': + default: + return fmt.Errorf("invalid character '%c' in name %q", + c, name) + } + } + if !IsAlphaNumeric(rune(name[len(name)-1])) { + return fmt.Errorf("%q, should end with a letter or digit", name) + } + + return nil +} + +// ValidateDeviceName checks the validity of a device name. +// A device name may contain the following ASCII characters: +// - upper- and lowercase letters ('A'-'Z', 'a'-'z') +// - digits ('0'-'9') +// - underscore, dash, dot, colon ('_', '-', '.', ':') +func ValidateDeviceName(name string) error { + if name == "" { + return fmt.Errorf("invalid (empty) device name") + } + if !IsAlphaNumeric(rune(name[0])) { + return fmt.Errorf("invalid class %q, should start with a letter or digit", name) + } + if len(name) == 1 { + return nil + } + for _, c := range string(name[1 : len(name)-1]) { + switch { + case IsAlphaNumeric(c): + case c == '_' || c == '-' || c == '.' || c == ':': + default: + return fmt.Errorf("invalid character '%c' in device name %q", + c, name) + } + } + if !IsAlphaNumeric(rune(name[len(name)-1])) { + return fmt.Errorf("invalid name %q, should end with a letter or digit", name) + } + return nil +} + +// IsLetter reports whether the rune is a letter. +func IsLetter(c rune) bool { + return ('A' <= c && c <= 'Z') || ('a' <= c && c <= 'z') +} + +// IsDigit reports whether the rune is a digit. +func IsDigit(c rune) bool { + return '0' <= c && c <= '9' +} + +// IsAlphaNumeric reports whether the rune is a letter or digit. +func IsAlphaNumeric(c rune) bool { + return IsLetter(c) || IsDigit(c) +}