diff --git a/vendor.conf b/vendor.conf
index 85fe21f23..665b6debe 100644
--- a/vendor.conf
+++ b/vendor.conf
@@ -5,12 +5,13 @@ github.com/opencontainers/selinux 31f70552238c5e017d78c3f1ba65
github.com/tchap/go-patricia 666120de432aea38ab06bd5c818f04f4129882c9 # v2.2.6
# containerd dependencies
-github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9
+github.com/beorn7/perks 37c8de3658fcb183f997c4e13e8337516ab753e6 # v1.0.1
github.com/BurntSushi/toml 3012a1dbe2e4bd1391d42b32f0577cb7bbc7f005 # v0.3.1
+github.com/cespare/xxhash/v2 d7df74196a9e781ede915320c11c378c1b2f3a1f # v2.1.1
github.com/containerd/cgroups 7347743e5d1e8500d9f27c8e748e689ed991d92b
github.com/containerd/console 8375c3424e4d7b114e8a90a4a40c8e1b40d1d4e6
-github.com/containerd/containerd e1221e69a824ce9aaca34c5bb603feb2f921b883
-github.com/containerd/continuity f2a389ac0a02ce21c09edd7344677a601970f41c
+github.com/containerd/containerd 01310155947cb6eec37dcae29742a165e56acb4a
+github.com/containerd/continuity 0ec596719c75bfd42908850990acea594b7593ac
github.com/containerd/fifo bda0ff6ed73c67bfb5e62bc9c697f146b7fd7f13
github.com/containerd/go-runc a5c2862aed5e6358b305b0e16bfce58e0549b1cd
github.com/containerd/ttrpc 92c8520ef9f86600c650dd540266a007bf03670f
@@ -18,29 +19,29 @@ github.com/containerd/typeurl a93fcdb778cd272c6e9b3028b2f4
github.com/coreos/go-systemd/v22 2d78030078ef61b3cae27f42ad6d0e46db51b339 # v22.0.0
github.com/cpuguy83/go-md2man 7762f7e404f8416dfa1d9bb6a8c192aa9acb4d19 # v1.0.10
github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9
-github.com/docker/go-metrics 4ea375f7759c82740c893fc030bc37088d2ec098
+github.com/docker/go-metrics b619b3592b65de4f087d9f16863a7e6ff905973c # v0.0.1
github.com/docker/go-units 519db1ee28dcc9fd2474ae59fca29a810482bfb1 # v0.4.0
github.com/godbus/dbus/v5 37bf87eef99d69c4f1d3528bd66e3a87dc201472 # v5.0.3
-github.com/gogo/googleapis d31c731455cb061f42baff3bda55bad0118b126b # v1.2.0
-github.com/gogo/protobuf ba06b47c162d49f2af050fb4c75bcbc86a159d5c # v1.2.1
-github.com/golang/protobuf aa810b61a9c79d51363740d207bb46cf8e620ed5 # v1.2.0
+github.com/gogo/googleapis 01e0f9cca9b92166042241267ee2a5cdf5cff46c # v1.3.2
+github.com/gogo/protobuf 5628607bb4c51c3157aacc3a50f0ab707582b805 # v1.3.1
+github.com/golang/protobuf d23c5127dc24889085f8ccea5c9d560a57a879d8 # v1.3.3
github.com/google/uuid 0cd6bf5da1e1c83f8b45653022c74f71af0538a4 # v1.1.1
-github.com/grpc-ecosystem/go-grpc-prometheus 6b7015e65d366bf3f19b2b2a000a831940f0f7e0
+github.com/grpc-ecosystem/go-grpc-prometheus c225b8c3b01faf2899099b768856a9e916e5087b # v1.2.0
github.com/hashicorp/golang-lru 7f827b33c0f158ec5dfbba01bb0b14a4541fd81d # v0.5.3
github.com/imdario/mergo 7c29201646fa3de8506f701213473dd407f19646 # v0.3.7
github.com/konsorten/go-windows-terminal-sequences 5c8c8bd35d3832f5d134ae1e1e375b69a4d25242 # v1.0.1
github.com/matttproud/golang_protobuf_extensions c12348ce28de40eed0136aa2b644d0ee0650e56c # v1.0.1
github.com/Microsoft/go-winio 6c72808b55902eae4c5943626030429ff20f3b63 # v0.4.14
-github.com/Microsoft/hcsshim b3f49c06ffaeef24d09c6c08ec8ec8425a0303e2 # v0.8.7
+github.com/Microsoft/hcsshim 0b571ac85d7c5842b26d2571de4868634a4c39d7 # v0.8.7-24-g0b571ac8
github.com/opencontainers/go-digest c9281466c8b2f606084ac71339773efd177436e7
github.com/opencontainers/image-spec d60099175f88c47cd379c4738d158884749ed235 # v1.0.1
github.com/opencontainers/runc dc9208a3303feef5b3839f4323d9beb36df0a9dd # v1.0.0-rc10
github.com/opencontainers/runtime-spec 29686dbc5559d93fb1ef402eeda3e35c38d75af4 # v1.0.1-59-g29686db
github.com/pkg/errors ba968bfe8b2f7e042a574c888954fccecfa385b4 # v0.8.1
-github.com/prometheus/client_golang f4fb1b73fb099f396a7f0036bf86aa8def4ed823
-github.com/prometheus/client_model 99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c
-github.com/prometheus/common 89604d197083d4781071d3c65855d24ecfb0a563
-github.com/prometheus/procfs cb4147076ac75738c9a7d279075a253c0cc5acbd
+github.com/prometheus/client_golang c42bebe5a5cddfc6b28cd639103369d8a75dfa89 # v1.3.0
+github.com/prometheus/client_model d1d2010b5beead3fa1c5f271a5cf626e40b3ad6e # v0.1.0
+github.com/prometheus/common 287d3e634a1e550c9e463dd7e5a75a422c614505 # v0.7.0
+github.com/prometheus/procfs 6d489fc7f1d9cd890a250f3ea3431b1744b9623f # v0.0.8
github.com/russross/blackfriday 05f3235734ad95d0016f6a23902f06461fcf567a # v1.5.2
github.com/sirupsen/logrus 8bdbc7bcc01dcbb8ec23dc8a28e332258d25251f # v1.4.1
github.com/syndtr/gocapability d98352740cb2c55f81556b63d4a1ec64c5a319c2
@@ -51,8 +52,8 @@ golang.org/x/net f3200d17e092c607f615320ecaad
golang.org/x/sync 42b317875d0fa942474b76e1b46a6060d720ae6e
golang.org/x/sys 52ab431487773bc9dd1b0766228b1cf3944126bf
golang.org/x/text 19e51611da83d6be54ddafce4a4af510cb3e9ea4
-google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944
-google.golang.org/grpc 39e8a7b072a67ca2a75f57fa2e0d50995f5b22f6 # v1.23.1
+google.golang.org/genproto e50cd9704f63023d62cd06a1994b98227fc4d21a
+google.golang.org/grpc f495f5b15ae7ccda3b38c53a1bfcde4c1a58a2bc # v1.27.1
# cgroups dependencies
github.com/cilium/ebpf 60c3aa43f488292fe2ee50fb8b833b383ca8ebbb
diff --git a/vendor/github.com/Microsoft/hcsshim/README.md b/vendor/github.com/Microsoft/hcsshim/README.md
index 15b39181a..3e290e1e5 100644
--- a/vendor/github.com/Microsoft/hcsshim/README.md
+++ b/vendor/github.com/Microsoft/hcsshim/README.md
@@ -2,7 +2,7 @@
[![Build status](https://ci.appveyor.com/api/projects/status/nbcw28mnkqml0loa/branch/master?svg=true)](https://ci.appveyor.com/project/WindowsVirtualization/hcsshim/branch/master)
-This package contains the Golang interface for using the Windows [Host Compute Service](https://blogs.technet.microsoft.com/virtualization/2017/01/27/introducing-the-host-compute-service-hcs/) (HCS) to launch and manage [Windows Containers](https://docs.microsoft.com/en-us/virtualization/windowscontainers/about/). It also contains other helpers and functions for managing Windows Containers such as the Golang interface for the Host Network Service (HNS).
+This package contains the Golang interface for using the Windows [Host Compute Service](https://techcommunity.microsoft.com/t5/containers/introducing-the-host-compute-service-hcs/ba-p/382332) (HCS) to launch and manage [Windows Containers](https://docs.microsoft.com/en-us/virtualization/windowscontainers/about/). It also contains other helpers and functions for managing Windows Containers such as the Golang interface for the Host Network Service (HNS).
It is primarily used in the [Moby Project](https://github.com/moby/moby), but it can be freely used by other projects as well.
diff --git a/vendor/github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/options/runhcs.pb.go b/vendor/github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/options/runhcs.pb.go
index e805b3e5f..dcbc2dfe2 100644
--- a/vendor/github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/options/runhcs.pb.go
+++ b/vendor/github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/options/runhcs.pb.go
@@ -101,7 +101,18 @@ type Options struct {
SandboxIsolation Options_SandboxIsolation `protobuf:"varint,6,opt,name=sandbox_isolation,json=sandboxIsolation,proto3,enum=containerd.runhcs.v1.Options_SandboxIsolation" json:"sandbox_isolation,omitempty"`
// boot_files_root_path is the path to the directory containing the LCOW
// kernel and root FS files.
- BootFilesRootPath string `protobuf:"bytes,7,opt,name=boot_files_root_path,json=bootFilesRootPath,proto3" json:"boot_files_root_path,omitempty"`
+ BootFilesRootPath string `protobuf:"bytes,7,opt,name=boot_files_root_path,json=bootFilesRootPath,proto3" json:"boot_files_root_path,omitempty"`
+ // vm_processor_count is the default number of processors to create for the
+ // hypervisor isolated utility vm.
+ //
+ // The platform default if omitted is 2, unless the host only has a single
+ // core in which case it is 1.
+ VmProcessorCount int32 `protobuf:"varint,8,opt,name=vm_processor_count,json=vmProcessorCount,proto3" json:"vm_processor_count,omitempty"`
+ // vm_memory_size_in_mb is the default amount of memory to assign to the
+ // hypervisor isolated utility vm.
+ //
+ // The platform default is 1024MB if omitted.
+ VmMemorySizeInMb int32 `protobuf:"varint,9,opt,name=vm_memory_size_in_mb,json=vmMemorySizeInMb,proto3" json:"vm_memory_size_in_mb,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
@@ -200,51 +211,55 @@ func init() {
}
var fileDescriptor_b643df6839c75082 = []byte{
- // 704 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x54, 0x4d, 0x6f, 0xda, 0x48,
- 0x18, 0xc6, 0xe1, 0xd3, 0x6f, 0x96, 0xc4, 0x99, 0xe5, 0x80, 0xb2, 0xbb, 0x80, 0xc8, 0x21, 0x89,
- 0x76, 0x63, 0x43, 0xf6, 0xd8, 0x53, 0x09, 0xa0, 0xba, 0x6a, 0x83, 0x65, 0xa2, 0xa6, 0x1f, 0x07,
- 0xcb, 0xd8, 0x83, 0xb1, 0x82, 0x3d, 0xd6, 0xcc, 0x90, 0x86, 0x5b, 0x7f, 0x42, 0x7f, 0x55, 0x95,
- 0x63, 0x8f, 0x95, 0x2a, 0xa5, 0x0d, 0xbf, 0xa4, 0x9a, 0xb1, 0x49, 0xd4, 0x28, 0xea, 0xa5, 0x27,
- 0xc6, 0xcf, 0xf3, 0xbc, 0xcf, 0xfb, 0x29, 0x60, 0x14, 0x84, 0x7c, 0xb6, 0x98, 0xe8, 0x1e, 0x89,
- 0x8c, 0x97, 0xa1, 0x47, 0x09, 0x23, 0x53, 0x6e, 0xcc, 0x3c, 0xc6, 0x66, 0x61, 0x64, 0x78, 0x91,
- 0x6f, 0x78, 0x24, 0xe6, 0x6e, 0x18, 0x63, 0xea, 0x1f, 0x09, 0xec, 0x88, 0x2e, 0xe2, 0x99, 0xc7,
- 0x8e, 0x2e, 0xbb, 0x06, 0x49, 0x78, 0x48, 0x62, 0x66, 0xa4, 0x88, 0x9e, 0x50, 0xc2, 0x09, 0xaa,
- 0xdd, 0xeb, 0xf5, 0x8c, 0xb8, 0xec, 0xee, 0xd6, 0x02, 0x12, 0x10, 0x29, 0x30, 0xc4, 0x2b, 0xd5,
- 0xee, 0x36, 0x03, 0x42, 0x82, 0x39, 0x36, 0xe4, 0xd7, 0x64, 0x31, 0x35, 0x78, 0x18, 0x61, 0xc6,
- 0xdd, 0x28, 0x49, 0x05, 0xed, 0x4f, 0x79, 0x28, 0x8f, 0xd2, 0x2c, 0xa8, 0x06, 0x45, 0x1f, 0x4f,
- 0x16, 0x41, 0x5d, 0x69, 0x29, 0x07, 0x15, 0x3b, 0xfd, 0x40, 0x43, 0x00, 0xf9, 0x70, 0xf8, 0x32,
- 0xc1, 0xf5, 0x8d, 0x96, 0x72, 0xb0, 0x75, 0xbc, 0xaf, 0x3f, 0x56, 0x83, 0x9e, 0x19, 0xe9, 0x7d,
- 0xa1, 0x3f, 0x5b, 0x26, 0xd8, 0x56, 0xfd, 0xf5, 0x13, 0xed, 0x41, 0x95, 0xe2, 0x20, 0x64, 0x9c,
- 0x2e, 0x1d, 0x4a, 0x08, 0xaf, 0xe7, 0x5b, 0xca, 0x81, 0x6a, 0xff, 0xb1, 0x06, 0x6d, 0x42, 0xb8,
- 0x10, 0x31, 0x37, 0xf6, 0x27, 0xe4, 0xca, 0x09, 0x23, 0x37, 0xc0, 0xf5, 0x42, 0x2a, 0xca, 0x40,
- 0x53, 0x60, 0xe8, 0x10, 0xb4, 0xb5, 0x28, 0x99, 0xbb, 0x7c, 0x4a, 0x68, 0x54, 0x2f, 0x4a, 0xdd,
- 0x76, 0x86, 0x5b, 0x19, 0x8c, 0xde, 0xc1, 0xce, 0x9d, 0x1f, 0x23, 0x73, 0x57, 0xd4, 0x57, 0x2f,
- 0xc9, 0x1e, 0xf4, 0x5f, 0xf7, 0x30, 0xce, 0x32, 0xae, 0xa3, 0xec, 0x75, 0xce, 0x3b, 0x04, 0x19,
- 0x50, 0x9b, 0x10, 0xc2, 0x9d, 0x69, 0x38, 0xc7, 0x4c, 0xf6, 0xe4, 0x24, 0x2e, 0x9f, 0xd5, 0xcb,
- 0xb2, 0x96, 0x1d, 0xc1, 0x0d, 0x05, 0x25, 0x3a, 0xb3, 0x5c, 0x3e, 0x6b, 0x1f, 0x82, 0x7a, 0x37,
- 0x1a, 0xa4, 0x42, 0xf1, 0xd4, 0x32, 0xad, 0x81, 0x96, 0x43, 0x15, 0x28, 0x0c, 0xcd, 0x17, 0x03,
- 0x4d, 0x41, 0x65, 0xc8, 0x0f, 0xce, 0xce, 0xb5, 0x8d, 0xb6, 0x01, 0xda, 0xc3, 0x0a, 0xd0, 0x26,
- 0x94, 0x2d, 0x7b, 0x74, 0x32, 0x18, 0x8f, 0xb5, 0x1c, 0xda, 0x02, 0x78, 0xf6, 0xc6, 0x1a, 0xd8,
- 0xaf, 0xcc, 0xf1, 0xc8, 0xd6, 0x94, 0xf6, 0xd7, 0x3c, 0x6c, 0x59, 0x94, 0x78, 0x98, 0xb1, 0x3e,
- 0xe6, 0x6e, 0x38, 0x67, 0xe8, 0x1f, 0x00, 0x39, 0x44, 0x27, 0x76, 0x23, 0x2c, 0x97, 0xaa, 0xda,
- 0xaa, 0x44, 0x4e, 0xdd, 0x08, 0xa3, 0x13, 0x00, 0x8f, 0x62, 0x97, 0x63, 0xdf, 0x71, 0xb9, 0x5c,
- 0xec, 0xe6, 0xf1, 0xae, 0x9e, 0x1e, 0x8c, 0xbe, 0x3e, 0x18, 0xfd, 0x6c, 0x7d, 0x30, 0xbd, 0xca,
- 0xf5, 0x4d, 0x33, 0xf7, 0xf1, 0x5b, 0x53, 0xb1, 0xd5, 0x2c, 0xee, 0x29, 0x47, 0xff, 0x02, 0xba,
- 0xc0, 0x34, 0xc6, 0x73, 0x47, 0x5c, 0x96, 0xd3, 0xed, 0x74, 0x9c, 0x98, 0xc9, 0xd5, 0x16, 0xec,
- 0xed, 0x94, 0x11, 0x0e, 0xdd, 0x4e, 0xe7, 0x94, 0x21, 0x1d, 0xfe, 0x8c, 0x70, 0x44, 0xe8, 0xd2,
- 0xf1, 0x48, 0x14, 0x85, 0xdc, 0x99, 0x2c, 0x39, 0x66, 0x72, 0xc7, 0x05, 0x7b, 0x27, 0xa5, 0x4e,
- 0x24, 0xd3, 0x13, 0x04, 0x1a, 0x42, 0x2b, 0xd3, 0xbf, 0x27, 0xf4, 0x22, 0x8c, 0x03, 0x87, 0x61,
- 0xee, 0x24, 0x34, 0xbc, 0x74, 0x39, 0xce, 0x82, 0x8b, 0x32, 0xf8, 0xef, 0x54, 0x77, 0x9e, 0xca,
- 0xc6, 0x98, 0x5b, 0xa9, 0x28, 0xf5, 0xe9, 0x43, 0xf3, 0x11, 0x1f, 0x36, 0x73, 0x29, 0xf6, 0x33,
- 0x9b, 0x92, 0xb4, 0xf9, 0xeb, 0xa1, 0xcd, 0x58, 0x6a, 0x52, 0x97, 0xff, 0x00, 0x92, 0x74, 0xc0,
- 0x4e, 0xe8, 0xcb, 0x25, 0x57, 0x7b, 0xd5, 0xd5, 0x4d, 0x53, 0xcd, 0xc6, 0x6e, 0xf6, 0x6d, 0x35,
- 0x13, 0x98, 0x3e, 0xda, 0x07, 0x6d, 0xc1, 0x30, 0xfd, 0x69, 0x2c, 0x15, 0x99, 0xa4, 0x2a, 0xf0,
- 0xfb, 0xa1, 0xec, 0x41, 0x19, 0x5f, 0x61, 0x4f, 0x78, 0xaa, 0x62, 0x45, 0x3d, 0x58, 0xdd, 0x34,
- 0x4b, 0x83, 0x2b, 0xec, 0x99, 0x7d, 0xbb, 0x24, 0x28, 0xd3, 0xef, 0xf9, 0xd7, 0xb7, 0x8d, 0xdc,
- 0x97, 0xdb, 0x46, 0xee, 0xc3, 0xaa, 0xa1, 0x5c, 0xaf, 0x1a, 0xca, 0xe7, 0x55, 0x43, 0xf9, 0xbe,
- 0x6a, 0x28, 0x6f, 0x9f, 0xff, 0xfe, 0xdf, 0xcb, 0x93, 0xec, 0xf7, 0x75, 0x6e, 0x52, 0x92, 0x7b,
- 0xff, 0xff, 0x47, 0x00, 0x00, 0x00, 0xff, 0xff, 0xa3, 0x9a, 0x54, 0x17, 0xb5, 0x04, 0x00, 0x00,
+ // 760 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x54, 0xcb, 0x6e, 0xdb, 0x46,
+ 0x14, 0x15, 0x63, 0xbd, 0x78, 0x53, 0x3b, 0xf4, 0x54, 0x0b, 0xc2, 0x6d, 0x25, 0xc1, 0x59, 0xc4,
+ 0x41, 0x63, 0xd2, 0x4e, 0x97, 0x5d, 0x55, 0x96, 0x8c, 0xb2, 0xa8, 0x6d, 0x82, 0x32, 0x9a, 0x3e,
+ 0x16, 0x03, 0x3e, 0xc6, 0xd4, 0x20, 0x1a, 0x0e, 0x31, 0x33, 0x52, 0xad, 0xac, 0xfa, 0x09, 0xfd,
+ 0x88, 0x7e, 0x8c, 0x97, 0x5d, 0x16, 0x28, 0xe0, 0x36, 0xfa, 0x92, 0x62, 0x86, 0xa4, 0x83, 0x06,
+ 0x41, 0x37, 0x5d, 0x69, 0x78, 0xce, 0x99, 0x73, 0x1f, 0x73, 0x20, 0xb8, 0xca, 0xa9, 0x5a, 0xac,
+ 0x12, 0x2f, 0xe5, 0xcc, 0xbf, 0xa0, 0xa9, 0xe0, 0x92, 0xdf, 0x28, 0x7f, 0x91, 0x4a, 0xb9, 0xa0,
+ 0xcc, 0x4f, 0x59, 0xe6, 0xa7, 0xbc, 0x50, 0x31, 0x2d, 0x88, 0xc8, 0x8e, 0x35, 0x76, 0x2c, 0x56,
+ 0xc5, 0x22, 0x95, 0xc7, 0xeb, 0x53, 0x9f, 0x97, 0x8a, 0xf2, 0x42, 0xfa, 0x15, 0xe2, 0x95, 0x82,
+ 0x2b, 0x8e, 0x06, 0xef, 0xf4, 0x5e, 0x4d, 0xac, 0x4f, 0x0f, 0x06, 0x39, 0xcf, 0xb9, 0x11, 0xf8,
+ 0xfa, 0x54, 0x69, 0x0f, 0x46, 0x39, 0xe7, 0xf9, 0x92, 0xf8, 0xe6, 0x2b, 0x59, 0xdd, 0xf8, 0x8a,
+ 0x32, 0x22, 0x55, 0xcc, 0xca, 0x4a, 0x70, 0xf8, 0x5b, 0x1b, 0x7a, 0x57, 0x55, 0x15, 0x34, 0x80,
+ 0x4e, 0x46, 0x92, 0x55, 0xee, 0x5a, 0x63, 0xeb, 0xa8, 0x1f, 0x55, 0x1f, 0xe8, 0x1c, 0xc0, 0x1c,
+ 0xb0, 0xda, 0x94, 0xc4, 0x7d, 0x34, 0xb6, 0x8e, 0xf6, 0x5e, 0x3e, 0xf3, 0x3e, 0xd4, 0x83, 0x57,
+ 0x1b, 0x79, 0x53, 0xad, 0xbf, 0xde, 0x94, 0x24, 0xb2, 0xb3, 0xe6, 0x88, 0x9e, 0xc2, 0xae, 0x20,
+ 0x39, 0x95, 0x4a, 0x6c, 0xb0, 0xe0, 0x5c, 0xb9, 0x3b, 0x63, 0xeb, 0xc8, 0x8e, 0x3e, 0x6a, 0xc0,
+ 0x88, 0x73, 0xa5, 0x45, 0x32, 0x2e, 0xb2, 0x84, 0xdf, 0x62, 0xca, 0xe2, 0x9c, 0xb8, 0xed, 0x4a,
+ 0x54, 0x83, 0x81, 0xc6, 0xd0, 0x73, 0x70, 0x1a, 0x51, 0xb9, 0x8c, 0xd5, 0x0d, 0x17, 0xcc, 0xed,
+ 0x18, 0xdd, 0x93, 0x1a, 0x0f, 0x6b, 0x18, 0xfd, 0x04, 0xfb, 0x0f, 0x7e, 0x92, 0x2f, 0x63, 0xdd,
+ 0x9f, 0xdb, 0x35, 0x33, 0x78, 0xff, 0x3d, 0xc3, 0xbc, 0xae, 0xd8, 0xdc, 0x8a, 0x9a, 0x9a, 0x0f,
+ 0x08, 0xf2, 0x61, 0x90, 0x70, 0xae, 0xf0, 0x0d, 0x5d, 0x12, 0x69, 0x66, 0xc2, 0x65, 0xac, 0x16,
+ 0x6e, 0xcf, 0xf4, 0xb2, 0xaf, 0xb9, 0x73, 0x4d, 0xe9, 0xc9, 0xc2, 0x58, 0x2d, 0xd0, 0x0b, 0x40,
+ 0x6b, 0x86, 0x4b, 0xc1, 0x53, 0x22, 0x25, 0x17, 0x38, 0xe5, 0xab, 0x42, 0xb9, 0xfd, 0xb1, 0x75,
+ 0xd4, 0x89, 0x9c, 0x35, 0x0b, 0x1b, 0xe2, 0x4c, 0xe3, 0xc8, 0x83, 0xc1, 0x9a, 0x61, 0x46, 0x18,
+ 0x17, 0x1b, 0x2c, 0xe9, 0x1b, 0x82, 0x69, 0x81, 0x59, 0xe2, 0xda, 0x8d, 0xfe, 0xc2, 0x50, 0x73,
+ 0xfa, 0x86, 0x04, 0xc5, 0x45, 0x72, 0xf8, 0x1c, 0xec, 0x87, 0xc5, 0x23, 0x1b, 0x3a, 0x97, 0x61,
+ 0x10, 0xce, 0x9c, 0x16, 0xea, 0x43, 0xfb, 0x3c, 0xf8, 0x76, 0xe6, 0x58, 0xa8, 0x07, 0x3b, 0xb3,
+ 0xeb, 0x57, 0xce, 0xa3, 0x43, 0x1f, 0x9c, 0xf7, 0xe7, 0x43, 0x8f, 0xa1, 0x17, 0x46, 0x57, 0x67,
+ 0xb3, 0xf9, 0xdc, 0x69, 0xa1, 0x3d, 0x80, 0xaf, 0x7f, 0x08, 0x67, 0xd1, 0x77, 0xc1, 0xfc, 0x2a,
+ 0x72, 0xac, 0xc3, 0x3f, 0x77, 0x60, 0xaf, 0x6e, 0x6f, 0x4a, 0x54, 0x4c, 0x97, 0x12, 0x7d, 0x06,
+ 0x60, 0x9e, 0x08, 0x17, 0x31, 0x23, 0x26, 0x32, 0x76, 0x64, 0x1b, 0xe4, 0x32, 0x66, 0x04, 0x9d,
+ 0x01, 0xa4, 0x82, 0xc4, 0x8a, 0x64, 0x38, 0x56, 0x26, 0x36, 0x8f, 0x5f, 0x1e, 0x78, 0x55, 0x1c,
+ 0xbd, 0x26, 0x8e, 0xde, 0x75, 0x13, 0xc7, 0x49, 0xff, 0xee, 0x7e, 0xd4, 0xfa, 0xf5, 0xaf, 0x91,
+ 0x15, 0xd9, 0xf5, 0xbd, 0xaf, 0x14, 0xfa, 0x1c, 0xd0, 0x6b, 0x22, 0x0a, 0xb2, 0xc4, 0x3a, 0xb7,
+ 0xf8, 0xf4, 0xe4, 0x04, 0x17, 0xd2, 0x04, 0xa7, 0x1d, 0x3d, 0xa9, 0x18, 0xed, 0x70, 0x7a, 0x72,
+ 0x72, 0x29, 0x91, 0x07, 0x1f, 0xd7, 0xcb, 0x4a, 0x39, 0x63, 0x54, 0xe1, 0x64, 0xa3, 0x88, 0x34,
+ 0x09, 0x6a, 0x47, 0xfb, 0x15, 0x75, 0x66, 0x98, 0x89, 0x26, 0xd0, 0x39, 0x8c, 0x6b, 0xfd, 0xcf,
+ 0x5c, 0xbc, 0xa6, 0x45, 0x8e, 0x25, 0x51, 0xb8, 0x14, 0x74, 0x1d, 0x2b, 0x52, 0x5f, 0xee, 0x98,
+ 0xcb, 0x9f, 0x56, 0xba, 0x57, 0x95, 0x6c, 0x4e, 0x54, 0x58, 0x89, 0x2a, 0x9f, 0x29, 0x8c, 0x3e,
+ 0xe0, 0x23, 0x17, 0xb1, 0x20, 0x59, 0x6d, 0xd3, 0x35, 0x36, 0x9f, 0xbc, 0x6f, 0x33, 0x37, 0x9a,
+ 0xca, 0xe5, 0x05, 0x40, 0x1d, 0x0c, 0x4c, 0x33, 0x13, 0xa1, 0xdd, 0xc9, 0xee, 0xf6, 0x7e, 0x64,
+ 0xd7, 0x6b, 0x0f, 0xa6, 0x91, 0x5d, 0x0b, 0x82, 0x0c, 0x3d, 0x03, 0x67, 0x25, 0x89, 0xf8, 0xd7,
+ 0x5a, 0xfa, 0xa6, 0xc8, 0xae, 0xc6, 0xdf, 0x2d, 0xe5, 0x29, 0xf4, 0xc8, 0x2d, 0x49, 0xb5, 0xa7,
+ 0xce, 0x8d, 0x3d, 0x81, 0xed, 0xfd, 0xa8, 0x3b, 0xbb, 0x25, 0x69, 0x30, 0x8d, 0xba, 0x9a, 0x0a,
+ 0xb2, 0x49, 0x76, 0xf7, 0x76, 0xd8, 0xfa, 0xe3, 0xed, 0xb0, 0xf5, 0xcb, 0x76, 0x68, 0xdd, 0x6d,
+ 0x87, 0xd6, 0xef, 0xdb, 0xa1, 0xf5, 0xf7, 0x76, 0x68, 0xfd, 0xf8, 0xcd, 0xff, 0xff, 0xf3, 0xfa,
+ 0xb2, 0xfe, 0xfd, 0xbe, 0x95, 0x74, 0xcd, 0xbb, 0x7f, 0xf1, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff,
+ 0xd7, 0x90, 0x00, 0xaf, 0x13, 0x05, 0x00, 0x00,
}
func (m *Options) Marshal() (dAtA []byte, err error) {
@@ -306,6 +321,16 @@ func (m *Options) MarshalTo(dAtA []byte) (int, error) {
i = encodeVarintRunhcs(dAtA, i, uint64(len(m.BootFilesRootPath)))
i += copy(dAtA[i:], m.BootFilesRootPath)
}
+ if m.VmProcessorCount != 0 {
+ dAtA[i] = 0x40
+ i++
+ i = encodeVarintRunhcs(dAtA, i, uint64(m.VmProcessorCount))
+ }
+ if m.VmMemorySizeInMb != 0 {
+ dAtA[i] = 0x48
+ i++
+ i = encodeVarintRunhcs(dAtA, i, uint64(m.VmMemorySizeInMb))
+ }
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
@@ -423,6 +448,12 @@ func (m *Options) Size() (n int) {
if l > 0 {
n += 1 + l + sovRunhcs(uint64(l))
}
+ if m.VmProcessorCount != 0 {
+ n += 1 + sovRunhcs(uint64(m.VmProcessorCount))
+ }
+ if m.VmMemorySizeInMb != 0 {
+ n += 1 + sovRunhcs(uint64(m.VmMemorySizeInMb))
+ }
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
@@ -494,6 +525,8 @@ func (this *Options) String() string {
`SandboxPlatform:` + fmt.Sprintf("%v", this.SandboxPlatform) + `,`,
`SandboxIsolation:` + fmt.Sprintf("%v", this.SandboxIsolation) + `,`,
`BootFilesRootPath:` + fmt.Sprintf("%v", this.BootFilesRootPath) + `,`,
+ `VmProcessorCount:` + fmt.Sprintf("%v", this.VmProcessorCount) + `,`,
+ `VmMemorySizeInMb:` + fmt.Sprintf("%v", this.VmMemorySizeInMb) + `,`,
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
`}`,
}, "")
@@ -741,6 +774,44 @@ func (m *Options) Unmarshal(dAtA []byte) error {
}
m.BootFilesRootPath = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
+ case 8:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field VmProcessorCount", wireType)
+ }
+ m.VmProcessorCount = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRunhcs
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.VmProcessorCount |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 9:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field VmMemorySizeInMb", wireType)
+ }
+ m.VmMemorySizeInMb = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRunhcs
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.VmMemorySizeInMb |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
default:
iNdEx = preIndex
skippy, err := skipRunhcs(dAtA[iNdEx:])
diff --git a/vendor/github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/options/runhcs.proto b/vendor/github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/options/runhcs.proto
index 9d6852a0a..cde2a8f21 100644
--- a/vendor/github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/options/runhcs.proto
+++ b/vendor/github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/options/runhcs.proto
@@ -46,6 +46,19 @@ message Options {
// boot_files_root_path is the path to the directory containing the LCOW
// kernel and root FS files.
string boot_files_root_path = 7;
+
+ // vm_processor_count is the default number of processors to create for the
+ // hypervisor isolated utility vm.
+ //
+ // The platform default if omitted is 2, unless the host only has a single
+ // core in which case it is 1.
+ int32 vm_processor_count = 8;
+
+ // vm_memory_size_in_mb is the default amount of memory to assign to the
+ // hypervisor isolated utility vm.
+ //
+ // The platform default is 1024MB if omitted.
+ int32 vm_memory_size_in_mb = 9;
}
// ProcessDetails contains additional information about a process. This is the additional
diff --git a/vendor/github.com/Microsoft/hcsshim/hcn/hcnerrors.go b/vendor/github.com/Microsoft/hcsshim/hcn/hcnerrors.go
index dd029502e..1213e652a 100644
--- a/vendor/github.com/Microsoft/hcsshim/hcn/hcnerrors.go
+++ b/vendor/github.com/Microsoft/hcsshim/hcn/hcnerrors.go
@@ -35,7 +35,7 @@ func checkForErrors(methodName string, hr error, resultBuffer *uint16) error {
}
if errorFound {
- returnError := hcserror.New(hr, methodName, result)
+ returnError := new(hr, methodName, result)
logrus.Debugf(returnError.Error()) // HCN errors logged for debugging.
return returnError
}
@@ -43,6 +43,52 @@ func checkForErrors(methodName string, hr error, resultBuffer *uint16) error {
return nil
}
+type ErrorCode uint32
+
+// For common errors, define the error as it is in windows, so we can quickly determine it later
+const (
+ ERROR_NOT_FOUND = 0x490
+ HCN_E_PORT_ALREADY_EXISTS ErrorCode = 0x803b0013
+)
+
+type HcnError struct {
+ *hcserror.HcsError
+ code ErrorCode
+}
+
+func (e *HcnError) Error() string {
+ return e.HcsError.Error()
+}
+
+func CheckErrorWithCode(err error, code ErrorCode) bool {
+ hcnError, ok := err.(*HcnError)
+ if ok {
+ return hcnError.code == code
+ }
+ return false
+}
+
+func IsElementNotFoundError(err error) bool {
+ return CheckErrorWithCode(err, ERROR_NOT_FOUND)
+}
+
+func IsPortAlreadyExistsError(err error) bool {
+ return CheckErrorWithCode(err, HCN_E_PORT_ALREADY_EXISTS)
+}
+
+func new(hr error, title string, rest string) error {
+ err := &HcnError{}
+ hcsError := hcserror.New(hr, title, rest)
+ err.HcsError = hcsError.(*hcserror.HcsError)
+ err.code = ErrorCode(hcserror.Win32FromError(hr))
+ return err
+}
+
+//
+// Note that the below errors are not errors returned by hcn itself
+// we wish to seperate them as they are shim usage error
+//
+
// NetworkNotFoundError results from a failed seach for a network by Id or Name
type NetworkNotFoundError struct {
NetworkName string
@@ -50,10 +96,10 @@ type NetworkNotFoundError struct {
}
func (e NetworkNotFoundError) Error() string {
- if e.NetworkName == "" {
- return fmt.Sprintf("Network Name %s not found", e.NetworkName)
+ if e.NetworkName != "" {
+ return fmt.Sprintf("Network name %q not found", e.NetworkName)
}
- return fmt.Sprintf("Network Id %s not found", e.NetworkID)
+ return fmt.Sprintf("Network ID %q not found", e.NetworkID)
}
// EndpointNotFoundError results from a failed seach for an endpoint by Id or Name
@@ -63,10 +109,10 @@ type EndpointNotFoundError struct {
}
func (e EndpointNotFoundError) Error() string {
- if e.EndpointName == "" {
- return fmt.Sprintf("Endpoint Name %s not found", e.EndpointName)
+ if e.EndpointName != "" {
+ return fmt.Sprintf("Endpoint name %q not found", e.EndpointName)
}
- return fmt.Sprintf("Endpoint Id %s not found", e.EndpointID)
+ return fmt.Sprintf("Endpoint ID %q not found", e.EndpointID)
}
// NamespaceNotFoundError results from a failed seach for a namsepace by Id
@@ -75,7 +121,7 @@ type NamespaceNotFoundError struct {
}
func (e NamespaceNotFoundError) Error() string {
- return fmt.Sprintf("Namespace %s not found", e.NamespaceID)
+ return fmt.Sprintf("Namespace ID %q not found", e.NamespaceID)
}
// LoadBalancerNotFoundError results from a failed seach for a loadbalancer by Id
@@ -84,7 +130,7 @@ type LoadBalancerNotFoundError struct {
}
func (e LoadBalancerNotFoundError) Error() string {
- return fmt.Sprintf("LoadBalancer %s not found", e.LoadBalancerId)
+ return fmt.Sprintf("LoadBalancer %q not found", e.LoadBalancerId)
}
// IsNotFoundError returns a boolean indicating whether the error was caused by
diff --git a/vendor/github.com/Microsoft/hcsshim/hcn/hcnnamespace.go b/vendor/github.com/Microsoft/hcsshim/hcn/hcnnamespace.go
index f99ff8754..22c7cf95f 100644
--- a/vendor/github.com/Microsoft/hcsshim/hcn/hcnnamespace.go
+++ b/vendor/github.com/Microsoft/hcsshim/hcn/hcnnamespace.go
@@ -247,11 +247,23 @@ func ListNamespacesQuery(query HostComputeQuery) ([]HostComputeNamespace, error)
// GetNamespaceByID returns the Namespace specified by Id.
func GetNamespaceByID(namespaceId string) (*HostComputeNamespace, error) {
- g, err := guid.FromString(namespaceId)
+ hcnQuery := defaultQuery()
+ mapA := map[string]string{"ID": namespaceId}
+ filter, err := json.Marshal(mapA)
if err != nil {
- return nil, errInvalidNamespaceID
+ return nil, err
+ }
+ hcnQuery.Filter = string(filter)
+
+ namespaces, err := ListNamespacesQuery(hcnQuery)
+ if err != nil {
+ return nil, err
}
- return getNamespace(g, defaultQueryJson())
+ if len(namespaces) == 0 {
+ return nil, NamespaceNotFoundError{NamespaceID: namespaceId}
+ }
+
+ return &namespaces[0], err
}
// GetNamespaceEndpointIds returns the endpoints of the Namespace specified by Id.
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/cgo.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/cgo.go
deleted file mode 100644
index 3669c34aa..000000000
--- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/cgo.go
+++ /dev/null
@@ -1,7 +0,0 @@
-package hcs
-
-import "C"
-
-// This import is needed to make the library compile as CGO because HCSSHIM
-// only works with CGO due to callbacks from HCS comming back from a C thread
-// which is not supported without CGO. See https://github.com/golang/go/issues/10973
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go
index 6300a7974..67a5f7176 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go
@@ -4,12 +4,9 @@ import (
"context"
"encoding/json"
"errors"
- "os"
- "strconv"
"strings"
"sync"
"syscall"
- "time"
"github.com/Microsoft/hcsshim/internal/cow"
"github.com/Microsoft/hcsshim/internal/log"
@@ -21,27 +18,6 @@ import (
"go.opencensus.io/trace"
)
-// currentContainerStarts is used to limit the number of concurrent container
-// starts.
-var currentContainerStarts containerStarts
-
-type containerStarts struct {
- maxParallel int
- inProgress int
- sync.Mutex
-}
-
-func init() {
- mpsS := os.Getenv("HCSSHIM_MAX_PARALLEL_START")
- if len(mpsS) > 0 {
- mpsI, err := strconv.Atoi(mpsS)
- if err != nil || mpsI < 0 {
- return
- }
- currentContainerStarts.maxParallel = mpsI
- }
-}
-
type System struct {
handleLock sync.RWMutex
handle vmcompute.HcsSystem
@@ -215,32 +191,6 @@ func (computeSystem *System) Start(ctx context.Context) (err error) {
return makeSystemError(computeSystem, operation, "", ErrAlreadyClosed, nil)
}
- // This is a very simple backoff-retry loop to limit the number
- // of parallel container starts if environment variable
- // HCSSHIM_MAX_PARALLEL_START is set to a positive integer.
- // It should generally only be used as a workaround to various
- // platform issues that exist between RS1 and RS4 as of Aug 2018
- if currentContainerStarts.maxParallel > 0 {
- for {
- currentContainerStarts.Lock()
- if currentContainerStarts.inProgress < currentContainerStarts.maxParallel {
- currentContainerStarts.inProgress++
- currentContainerStarts.Unlock()
- break
- }
- if currentContainerStarts.inProgress == currentContainerStarts.maxParallel {
- currentContainerStarts.Unlock()
- time.Sleep(100 * time.Millisecond)
- }
- }
- // Make sure we decrement the count when we are done.
- defer func() {
- currentContainerStarts.Lock()
- currentContainerStarts.inProgress--
- currentContainerStarts.Unlock()
- }()
- }
-
resultJSON, err := vmcompute.HcsStartComputeSystem(ctx, computeSystem.handle, "")
events, err := processAsyncHcsResult(ctx, err, resultJSON, computeSystem.callbackNumber, hcsNotificationSystemStartCompleted, &timeout.SystemStart)
if err != nil {
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema1/schema1.go b/vendor/github.com/Microsoft/hcsshim/internal/schema1/schema1.go
index fb23617f5..24bb3b46b 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/schema1/schema1.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/schema1/schema1.go
@@ -214,9 +214,10 @@ type MappedVirtualDiskController struct {
// GuestDefinedCapabilities is part of the GuestConnectionInfo returned by a GuestConnection call on a utility VM
type GuestDefinedCapabilities struct {
- NamespaceAddRequestSupported bool `json:",omitempty"`
- SignalProcessSupported bool `json:",omitempty"`
- DumpStacksSupported bool `json:",omitempty"`
+ NamespaceAddRequestSupported bool `json:",omitempty"`
+ SignalProcessSupported bool `json:",omitempty"`
+ DumpStacksSupported bool `json:",omitempty"`
+ DeleteContainerStateSupported bool `json:",omitempty"`
}
// GuestConnectionInfo is the structure of an iterm return by a GuestConnection call on a utility VM
diff --git a/vendor/github.com/beorn7/perks/go.mod b/vendor/github.com/beorn7/perks/go.mod
new file mode 100644
index 000000000..c4022f212
--- /dev/null
+++ b/vendor/github.com/beorn7/perks/go.mod
@@ -0,0 +1,3 @@
+module github.com/beorn7/perks
+
+go 1.11
diff --git a/vendor/github.com/beorn7/perks/quantile/stream.go b/vendor/github.com/beorn7/perks/quantile/stream.go
index f4cabd669..d7d14f8eb 100644
--- a/vendor/github.com/beorn7/perks/quantile/stream.go
+++ b/vendor/github.com/beorn7/perks/quantile/stream.go
@@ -77,15 +77,20 @@ func NewHighBiased(epsilon float64) *Stream {
// is guaranteed to be within (Quantile±Epsilon).
//
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties.
-func NewTargeted(targets map[float64]float64) *Stream {
+func NewTargeted(targetMap map[float64]float64) *Stream {
+ // Convert map to slice to avoid slow iterations on a map.
+ // ƒ is called on the hot path, so converting the map to a slice
+ // beforehand results in significant CPU savings.
+ targets := targetMapToSlice(targetMap)
+
ƒ := func(s *stream, r float64) float64 {
var m = math.MaxFloat64
var f float64
- for quantile, epsilon := range targets {
- if quantile*s.n <= r {
- f = (2 * epsilon * r) / quantile
+ for _, t := range targets {
+ if t.quantile*s.n <= r {
+ f = (2 * t.epsilon * r) / t.quantile
} else {
- f = (2 * epsilon * (s.n - r)) / (1 - quantile)
+ f = (2 * t.epsilon * (s.n - r)) / (1 - t.quantile)
}
if f < m {
m = f
@@ -96,6 +101,25 @@ func NewTargeted(targets map[float64]float64) *Stream {
return newStream(ƒ)
}
+type target struct {
+ quantile float64
+ epsilon float64
+}
+
+func targetMapToSlice(targetMap map[float64]float64) []target {
+ targets := make([]target, 0, len(targetMap))
+
+ for quantile, epsilon := range targetMap {
+ t := target{
+ quantile: quantile,
+ epsilon: epsilon,
+ }
+ targets = append(targets, t)
+ }
+
+ return targets
+}
+
// Stream computes quantiles for a stream of float64s. It is not thread-safe by
// design. Take care when using across multiple goroutines.
type Stream struct {
diff --git a/vendor/github.com/cespare/xxhash/v2/LICENSE.txt b/vendor/github.com/cespare/xxhash/v2/LICENSE.txt
new file mode 100644
index 000000000..24b53065f
--- /dev/null
+++ b/vendor/github.com/cespare/xxhash/v2/LICENSE.txt
@@ -0,0 +1,22 @@
+Copyright (c) 2016 Caleb Spare
+
+MIT License
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/cespare/xxhash/v2/README.md b/vendor/github.com/cespare/xxhash/v2/README.md
new file mode 100644
index 000000000..2fd8693c2
--- /dev/null
+++ b/vendor/github.com/cespare/xxhash/v2/README.md
@@ -0,0 +1,67 @@
+# xxhash
+
+[![GoDoc](https://godoc.org/github.com/cespare/xxhash?status.svg)](https://godoc.org/github.com/cespare/xxhash)
+[![Build Status](https://travis-ci.org/cespare/xxhash.svg?branch=master)](https://travis-ci.org/cespare/xxhash)
+
+xxhash is a Go implementation of the 64-bit
+[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a
+high-quality hashing algorithm that is much faster than anything in the Go
+standard library.
+
+This package provides a straightforward API:
+
+```
+func Sum64(b []byte) uint64
+func Sum64String(s string) uint64
+type Digest struct{ ... }
+ func New() *Digest
+```
+
+The `Digest` type implements hash.Hash64. Its key methods are:
+
+```
+func (*Digest) Write([]byte) (int, error)
+func (*Digest) WriteString(string) (int, error)
+func (*Digest) Sum64() uint64
+```
+
+This implementation provides a fast pure-Go implementation and an even faster
+assembly implementation for amd64.
+
+## Compatibility
+
+This package is in a module and the latest code is in version 2 of the module.
+You need a version of Go with at least "minimal module compatibility" to use
+github.com/cespare/xxhash/v2:
+
+* 1.9.7+ for Go 1.9
+* 1.10.3+ for Go 1.10
+* Go 1.11 or later
+
+I recommend using the latest release of Go.
+
+## Benchmarks
+
+Here are some quick benchmarks comparing the pure-Go and assembly
+implementations of Sum64.
+
+| input size | purego | asm |
+| --- | --- | --- |
+| 5 B | 979.66 MB/s | 1291.17 MB/s |
+| 100 B | 7475.26 MB/s | 7973.40 MB/s |
+| 4 KB | 17573.46 MB/s | 17602.65 MB/s |
+| 10 MB | 17131.46 MB/s | 17142.16 MB/s |
+
+These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using
+the following commands under Go 1.11.2:
+
+```
+$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes'
+$ go test -benchtime 10s -bench '/xxhash,direct,bytes'
+```
+
+## Projects using this package
+
+- [InfluxDB](https://github.com/influxdata/influxdb)
+- [Prometheus](https://github.com/prometheus/prometheus)
+- [FreeCache](https://github.com/coocood/freecache)
diff --git a/vendor/github.com/cespare/xxhash/v2/go.mod b/vendor/github.com/cespare/xxhash/v2/go.mod
new file mode 100644
index 000000000..49f67608b
--- /dev/null
+++ b/vendor/github.com/cespare/xxhash/v2/go.mod
@@ -0,0 +1,3 @@
+module github.com/cespare/xxhash/v2
+
+go 1.11
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash.go b/vendor/github.com/cespare/xxhash/v2/xxhash.go
new file mode 100644
index 000000000..db0b35fbe
--- /dev/null
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash.go
@@ -0,0 +1,236 @@
+// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described
+// at http://cyan4973.github.io/xxHash/.
+package xxhash
+
+import (
+ "encoding/binary"
+ "errors"
+ "math/bits"
+)
+
+const (
+ prime1 uint64 = 11400714785074694791
+ prime2 uint64 = 14029467366897019727
+ prime3 uint64 = 1609587929392839161
+ prime4 uint64 = 9650029242287828579
+ prime5 uint64 = 2870177450012600261
+)
+
+// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where
+// possible in the Go code is worth a small (but measurable) performance boost
+// by avoiding some MOVQs. Vars are needed for the asm and also are useful for
+// convenience in the Go code in a few places where we need to intentionally
+// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the
+// result overflows a uint64).
+var (
+ prime1v = prime1
+ prime2v = prime2
+ prime3v = prime3
+ prime4v = prime4
+ prime5v = prime5
+)
+
+// Digest implements hash.Hash64.
+type Digest struct {
+ v1 uint64
+ v2 uint64
+ v3 uint64
+ v4 uint64
+ total uint64
+ mem [32]byte
+ n int // how much of mem is used
+}
+
+// New creates a new Digest that computes the 64-bit xxHash algorithm.
+func New() *Digest {
+ var d Digest
+ d.Reset()
+ return &d
+}
+
+// Reset clears the Digest's state so that it can be reused.
+func (d *Digest) Reset() {
+ d.v1 = prime1v + prime2
+ d.v2 = prime2
+ d.v3 = 0
+ d.v4 = -prime1v
+ d.total = 0
+ d.n = 0
+}
+
+// Size always returns 8 bytes.
+func (d *Digest) Size() int { return 8 }
+
+// BlockSize always returns 32 bytes.
+func (d *Digest) BlockSize() int { return 32 }
+
+// Write adds more data to d. It always returns len(b), nil.
+func (d *Digest) Write(b []byte) (n int, err error) {
+ n = len(b)
+ d.total += uint64(n)
+
+ if d.n+n < 32 {
+ // This new data doesn't even fill the current block.
+ copy(d.mem[d.n:], b)
+ d.n += n
+ return
+ }
+
+ if d.n > 0 {
+ // Finish off the partial block.
+ copy(d.mem[d.n:], b)
+ d.v1 = round(d.v1, u64(d.mem[0:8]))
+ d.v2 = round(d.v2, u64(d.mem[8:16]))
+ d.v3 = round(d.v3, u64(d.mem[16:24]))
+ d.v4 = round(d.v4, u64(d.mem[24:32]))
+ b = b[32-d.n:]
+ d.n = 0
+ }
+
+ if len(b) >= 32 {
+ // One or more full blocks left.
+ nw := writeBlocks(d, b)
+ b = b[nw:]
+ }
+
+ // Store any remaining partial block.
+ copy(d.mem[:], b)
+ d.n = len(b)
+
+ return
+}
+
+// Sum appends the current hash to b and returns the resulting slice.
+func (d *Digest) Sum(b []byte) []byte {
+ s := d.Sum64()
+ return append(
+ b,
+ byte(s>>56),
+ byte(s>>48),
+ byte(s>>40),
+ byte(s>>32),
+ byte(s>>24),
+ byte(s>>16),
+ byte(s>>8),
+ byte(s),
+ )
+}
+
+// Sum64 returns the current hash.
+func (d *Digest) Sum64() uint64 {
+ var h uint64
+
+ if d.total >= 32 {
+ v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4
+ h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
+ h = mergeRound(h, v1)
+ h = mergeRound(h, v2)
+ h = mergeRound(h, v3)
+ h = mergeRound(h, v4)
+ } else {
+ h = d.v3 + prime5
+ }
+
+ h += d.total
+
+ i, end := 0, d.n
+ for ; i+8 <= end; i += 8 {
+ k1 := round(0, u64(d.mem[i:i+8]))
+ h ^= k1
+ h = rol27(h)*prime1 + prime4
+ }
+ if i+4 <= end {
+ h ^= uint64(u32(d.mem[i:i+4])) * prime1
+ h = rol23(h)*prime2 + prime3
+ i += 4
+ }
+ for i < end {
+ h ^= uint64(d.mem[i]) * prime5
+ h = rol11(h) * prime1
+ i++
+ }
+
+ h ^= h >> 33
+ h *= prime2
+ h ^= h >> 29
+ h *= prime3
+ h ^= h >> 32
+
+ return h
+}
+
+const (
+ magic = "xxh\x06"
+ marshaledSize = len(magic) + 8*5 + 32
+)
+
+// MarshalBinary implements the encoding.BinaryMarshaler interface.
+func (d *Digest) MarshalBinary() ([]byte, error) {
+ b := make([]byte, 0, marshaledSize)
+ b = append(b, magic...)
+ b = appendUint64(b, d.v1)
+ b = appendUint64(b, d.v2)
+ b = appendUint64(b, d.v3)
+ b = appendUint64(b, d.v4)
+ b = appendUint64(b, d.total)
+ b = append(b, d.mem[:d.n]...)
+ b = b[:len(b)+len(d.mem)-d.n]
+ return b, nil
+}
+
+// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
+func (d *Digest) UnmarshalBinary(b []byte) error {
+ if len(b) < len(magic) || string(b[:len(magic)]) != magic {
+ return errors.New("xxhash: invalid hash state identifier")
+ }
+ if len(b) != marshaledSize {
+ return errors.New("xxhash: invalid hash state size")
+ }
+ b = b[len(magic):]
+ b, d.v1 = consumeUint64(b)
+ b, d.v2 = consumeUint64(b)
+ b, d.v3 = consumeUint64(b)
+ b, d.v4 = consumeUint64(b)
+ b, d.total = consumeUint64(b)
+ copy(d.mem[:], b)
+ b = b[len(d.mem):]
+ d.n = int(d.total % uint64(len(d.mem)))
+ return nil
+}
+
+func appendUint64(b []byte, x uint64) []byte {
+ var a [8]byte
+ binary.LittleEndian.PutUint64(a[:], x)
+ return append(b, a[:]...)
+}
+
+func consumeUint64(b []byte) ([]byte, uint64) {
+ x := u64(b)
+ return b[8:], x
+}
+
+func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) }
+func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) }
+
+func round(acc, input uint64) uint64 {
+ acc += input * prime2
+ acc = rol31(acc)
+ acc *= prime1
+ return acc
+}
+
+func mergeRound(acc, val uint64) uint64 {
+ val = round(0, val)
+ acc ^= val
+ acc = acc*prime1 + prime4
+ return acc
+}
+
+func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) }
+func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) }
+func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) }
+func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) }
+func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) }
+func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) }
+func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) }
+func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) }
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go
new file mode 100644
index 000000000..ad14b807f
--- /dev/null
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go
@@ -0,0 +1,13 @@
+// +build !appengine
+// +build gc
+// +build !purego
+
+package xxhash
+
+// Sum64 computes the 64-bit xxHash digest of b.
+//
+//go:noescape
+func Sum64(b []byte) uint64
+
+//go:noescape
+func writeBlocks(d *Digest, b []byte) int
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
new file mode 100644
index 000000000..d580e32ae
--- /dev/null
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
@@ -0,0 +1,215 @@
+// +build !appengine
+// +build gc
+// +build !purego
+
+#include "textflag.h"
+
+// Register allocation:
+// AX h
+// CX pointer to advance through b
+// DX n
+// BX loop end
+// R8 v1, k1
+// R9 v2
+// R10 v3
+// R11 v4
+// R12 tmp
+// R13 prime1v
+// R14 prime2v
+// R15 prime4v
+
+// round reads from and advances the buffer pointer in CX.
+// It assumes that R13 has prime1v and R14 has prime2v.
+#define round(r) \
+ MOVQ (CX), R12 \
+ ADDQ $8, CX \
+ IMULQ R14, R12 \
+ ADDQ R12, r \
+ ROLQ $31, r \
+ IMULQ R13, r
+
+// mergeRound applies a merge round on the two registers acc and val.
+// It assumes that R13 has prime1v, R14 has prime2v, and R15 has prime4v.
+#define mergeRound(acc, val) \
+ IMULQ R14, val \
+ ROLQ $31, val \
+ IMULQ R13, val \
+ XORQ val, acc \
+ IMULQ R13, acc \
+ ADDQ R15, acc
+
+// func Sum64(b []byte) uint64
+TEXT ·Sum64(SB), NOSPLIT, $0-32
+ // Load fixed primes.
+ MOVQ ·prime1v(SB), R13
+ MOVQ ·prime2v(SB), R14
+ MOVQ ·prime4v(SB), R15
+
+ // Load slice.
+ MOVQ b_base+0(FP), CX
+ MOVQ b_len+8(FP), DX
+ LEAQ (CX)(DX*1), BX
+
+ // The first loop limit will be len(b)-32.
+ SUBQ $32, BX
+
+ // Check whether we have at least one block.
+ CMPQ DX, $32
+ JLT noBlocks
+
+ // Set up initial state (v1, v2, v3, v4).
+ MOVQ R13, R8
+ ADDQ R14, R8
+ MOVQ R14, R9
+ XORQ R10, R10
+ XORQ R11, R11
+ SUBQ R13, R11
+
+ // Loop until CX > BX.
+blockLoop:
+ round(R8)
+ round(R9)
+ round(R10)
+ round(R11)
+
+ CMPQ CX, BX
+ JLE blockLoop
+
+ MOVQ R8, AX
+ ROLQ $1, AX
+ MOVQ R9, R12
+ ROLQ $7, R12
+ ADDQ R12, AX
+ MOVQ R10, R12
+ ROLQ $12, R12
+ ADDQ R12, AX
+ MOVQ R11, R12
+ ROLQ $18, R12
+ ADDQ R12, AX
+
+ mergeRound(AX, R8)
+ mergeRound(AX, R9)
+ mergeRound(AX, R10)
+ mergeRound(AX, R11)
+
+ JMP afterBlocks
+
+noBlocks:
+ MOVQ ·prime5v(SB), AX
+
+afterBlocks:
+ ADDQ DX, AX
+
+ // Right now BX has len(b)-32, and we want to loop until CX > len(b)-8.
+ ADDQ $24, BX
+
+ CMPQ CX, BX
+ JG fourByte
+
+wordLoop:
+ // Calculate k1.
+ MOVQ (CX), R8
+ ADDQ $8, CX
+ IMULQ R14, R8
+ ROLQ $31, R8
+ IMULQ R13, R8
+
+ XORQ R8, AX
+ ROLQ $27, AX
+ IMULQ R13, AX
+ ADDQ R15, AX
+
+ CMPQ CX, BX
+ JLE wordLoop
+
+fourByte:
+ ADDQ $4, BX
+ CMPQ CX, BX
+ JG singles
+
+ MOVL (CX), R8
+ ADDQ $4, CX
+ IMULQ R13, R8
+ XORQ R8, AX
+
+ ROLQ $23, AX
+ IMULQ R14, AX
+ ADDQ ·prime3v(SB), AX
+
+singles:
+ ADDQ $4, BX
+ CMPQ CX, BX
+ JGE finalize
+
+singlesLoop:
+ MOVBQZX (CX), R12
+ ADDQ $1, CX
+ IMULQ ·prime5v(SB), R12
+ XORQ R12, AX
+
+ ROLQ $11, AX
+ IMULQ R13, AX
+
+ CMPQ CX, BX
+ JL singlesLoop
+
+finalize:
+ MOVQ AX, R12
+ SHRQ $33, R12
+ XORQ R12, AX
+ IMULQ R14, AX
+ MOVQ AX, R12
+ SHRQ $29, R12
+ XORQ R12, AX
+ IMULQ ·prime3v(SB), AX
+ MOVQ AX, R12
+ SHRQ $32, R12
+ XORQ R12, AX
+
+ MOVQ AX, ret+24(FP)
+ RET
+
+// writeBlocks uses the same registers as above except that it uses AX to store
+// the d pointer.
+
+// func writeBlocks(d *Digest, b []byte) int
+TEXT ·writeBlocks(SB), NOSPLIT, $0-40
+ // Load fixed primes needed for round.
+ MOVQ ·prime1v(SB), R13
+ MOVQ ·prime2v(SB), R14
+
+ // Load slice.
+ MOVQ b_base+8(FP), CX
+ MOVQ b_len+16(FP), DX
+ LEAQ (CX)(DX*1), BX
+ SUBQ $32, BX
+
+ // Load vN from d.
+ MOVQ d+0(FP), AX
+ MOVQ 0(AX), R8 // v1
+ MOVQ 8(AX), R9 // v2
+ MOVQ 16(AX), R10 // v3
+ MOVQ 24(AX), R11 // v4
+
+ // We don't need to check the loop condition here; this function is
+ // always called with at least one block of data to process.
+blockLoop:
+ round(R8)
+ round(R9)
+ round(R10)
+ round(R11)
+
+ CMPQ CX, BX
+ JLE blockLoop
+
+ // Copy vN back to d.
+ MOVQ R8, 0(AX)
+ MOVQ R9, 8(AX)
+ MOVQ R10, 16(AX)
+ MOVQ R11, 24(AX)
+
+ // The number of bytes written is CX minus the old base pointer.
+ SUBQ b_base+8(FP), CX
+ MOVQ CX, ret+32(FP)
+
+ RET
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go
new file mode 100644
index 000000000..4a5a82160
--- /dev/null
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go
@@ -0,0 +1,76 @@
+// +build !amd64 appengine !gc purego
+
+package xxhash
+
+// Sum64 computes the 64-bit xxHash digest of b.
+func Sum64(b []byte) uint64 {
+ // A simpler version would be
+ // d := New()
+ // d.Write(b)
+ // return d.Sum64()
+ // but this is faster, particularly for small inputs.
+
+ n := len(b)
+ var h uint64
+
+ if n >= 32 {
+ v1 := prime1v + prime2
+ v2 := prime2
+ v3 := uint64(0)
+ v4 := -prime1v
+ for len(b) >= 32 {
+ v1 = round(v1, u64(b[0:8:len(b)]))
+ v2 = round(v2, u64(b[8:16:len(b)]))
+ v3 = round(v3, u64(b[16:24:len(b)]))
+ v4 = round(v4, u64(b[24:32:len(b)]))
+ b = b[32:len(b):len(b)]
+ }
+ h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
+ h = mergeRound(h, v1)
+ h = mergeRound(h, v2)
+ h = mergeRound(h, v3)
+ h = mergeRound(h, v4)
+ } else {
+ h = prime5
+ }
+
+ h += uint64(n)
+
+ i, end := 0, len(b)
+ for ; i+8 <= end; i += 8 {
+ k1 := round(0, u64(b[i:i+8:len(b)]))
+ h ^= k1
+ h = rol27(h)*prime1 + prime4
+ }
+ if i+4 <= end {
+ h ^= uint64(u32(b[i:i+4:len(b)])) * prime1
+ h = rol23(h)*prime2 + prime3
+ i += 4
+ }
+ for ; i < end; i++ {
+ h ^= uint64(b[i]) * prime5
+ h = rol11(h) * prime1
+ }
+
+ h ^= h >> 33
+ h *= prime2
+ h ^= h >> 29
+ h *= prime3
+ h ^= h >> 32
+
+ return h
+}
+
+func writeBlocks(d *Digest, b []byte) int {
+ v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4
+ n := len(b)
+ for len(b) >= 32 {
+ v1 = round(v1, u64(b[0:8:len(b)]))
+ v2 = round(v2, u64(b[8:16:len(b)]))
+ v3 = round(v3, u64(b[16:24:len(b)]))
+ v4 = round(v4, u64(b[24:32:len(b)]))
+ b = b[32:len(b):len(b)]
+ }
+ d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4
+ return n - len(b)
+}
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
new file mode 100644
index 000000000..fc9bea7a3
--- /dev/null
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
@@ -0,0 +1,15 @@
+// +build appengine
+
+// This file contains the safe implementations of otherwise unsafe-using code.
+
+package xxhash
+
+// Sum64String computes the 64-bit xxHash digest of s.
+func Sum64String(s string) uint64 {
+ return Sum64([]byte(s))
+}
+
+// WriteString adds more data to d. It always returns len(s), nil.
+func (d *Digest) WriteString(s string) (n int, err error) {
+ return d.Write([]byte(s))
+}
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
new file mode 100644
index 000000000..53bf76efb
--- /dev/null
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
@@ -0,0 +1,46 @@
+// +build !appengine
+
+// This file encapsulates usage of unsafe.
+// xxhash_safe.go contains the safe implementations.
+
+package xxhash
+
+import (
+ "reflect"
+ "unsafe"
+)
+
+// Notes:
+//
+// See https://groups.google.com/d/msg/golang-nuts/dcjzJy-bSpw/tcZYBzQqAQAJ
+// for some discussion about these unsafe conversions.
+//
+// In the future it's possible that compiler optimizations will make these
+// unsafe operations unnecessary: https://golang.org/issue/2205.
+//
+// Both of these wrapper functions still incur function call overhead since they
+// will not be inlined. We could write Go/asm copies of Sum64 and Digest.Write
+// for strings to squeeze out a bit more speed. Mid-stack inlining should
+// eventually fix this.
+
+// Sum64String computes the 64-bit xxHash digest of s.
+// It may be faster than Sum64([]byte(s)) by avoiding a copy.
+func Sum64String(s string) uint64 {
+ var b []byte
+ bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
+ bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
+ bh.Len = len(s)
+ bh.Cap = len(s)
+ return Sum64(b)
+}
+
+// WriteString adds more data to d. It always returns len(s), nil.
+// It may be faster than Write([]byte(s)) by avoiding a copy.
+func (d *Digest) WriteString(s string) (n int, err error) {
+ var b []byte
+ bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
+ bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
+ bh.Len = len(s)
+ bh.Cap = len(s)
+ return d.Write(b)
+}
diff --git a/vendor/github.com/containerd/containerd/README.md b/vendor/github.com/containerd/containerd/README.md
index 53d11dfad..68e57a78c 100644
--- a/vendor/github.com/containerd/containerd/README.md
+++ b/vendor/github.com/containerd/containerd/README.md
@@ -265,7 +265,7 @@ __If you are reporting a security issue, please reach out discreetly at security
## Licenses
-The containerd codebase is released under the [Apache 2.0 license](LICENSE.code).
+The containerd codebase is released under the [Apache 2.0 license](LICENSE).
The README.md file, and files in the "docs" folder are licensed under the
Creative Commons Attribution 4.0 International License. You may obtain a
copy of the license, titled CC-BY-4.0, at http://creativecommons.org/licenses/by/4.0/.
diff --git a/vendor/github.com/containerd/containerd/api/events/container.pb.go b/vendor/github.com/containerd/containerd/api/events/container.pb.go
index 2d0d1fa62..0c1e0a939 100644
--- a/vendor/github.com/containerd/containerd/api/events/container.pb.go
+++ b/vendor/github.com/containerd/containerd/api/events/container.pb.go
@@ -11,6 +11,7 @@ import (
types "github.com/gogo/protobuf/types"
io "io"
math "math"
+ math_bits "math/bits"
reflect "reflect"
strings "strings"
)
@@ -24,7 +25,7 @@ var _ = math.Inf
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
type ContainerCreate struct {
ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
@@ -48,7 +49,7 @@ func (m *ContainerCreate) XXX_Marshal(b []byte, deterministic bool) ([]byte, err
return xxx_messageInfo_ContainerCreate.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -88,7 +89,7 @@ func (m *ContainerCreate_Runtime) XXX_Marshal(b []byte, deterministic bool) ([]b
return xxx_messageInfo_ContainerCreate_Runtime.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -130,7 +131,7 @@ func (m *ContainerUpdate) XXX_Marshal(b []byte, deterministic bool) ([]byte, err
return xxx_messageInfo_ContainerUpdate.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -169,7 +170,7 @@ func (m *ContainerDelete) XXX_Marshal(b []byte, deterministic bool) ([]byte, err
return xxx_messageInfo_ContainerDelete.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -325,7 +326,7 @@ func (m *ContainerDelete) Field(fieldpath []string) (string, bool) {
func (m *ContainerCreate) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -333,42 +334,52 @@ func (m *ContainerCreate) Marshal() (dAtA []byte, err error) {
}
func (m *ContainerCreate) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ContainerCreate) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.ID) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintContainer(dAtA, i, uint64(len(m.ID)))
- i += copy(dAtA[i:], m.ID)
- }
- if len(m.Image) > 0 {
- dAtA[i] = 0x12
- i++
- i = encodeVarintContainer(dAtA, i, uint64(len(m.Image)))
- i += copy(dAtA[i:], m.Image)
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
if m.Runtime != nil {
- dAtA[i] = 0x1a
- i++
- i = encodeVarintContainer(dAtA, i, uint64(m.Runtime.Size()))
- n1, err := m.Runtime.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
+ {
+ size, err := m.Runtime.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintContainer(dAtA, i, uint64(size))
}
- i += n1
+ i--
+ dAtA[i] = 0x1a
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if len(m.Image) > 0 {
+ i -= len(m.Image)
+ copy(dAtA[i:], m.Image)
+ i = encodeVarintContainer(dAtA, i, uint64(len(m.Image)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.ID) > 0 {
+ i -= len(m.ID)
+ copy(dAtA[i:], m.ID)
+ i = encodeVarintContainer(dAtA, i, uint64(len(m.ID)))
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *ContainerCreate_Runtime) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -376,36 +387,45 @@ func (m *ContainerCreate_Runtime) Marshal() (dAtA []byte, err error) {
}
func (m *ContainerCreate_Runtime) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ContainerCreate_Runtime) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.Name) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintContainer(dAtA, i, uint64(len(m.Name)))
- i += copy(dAtA[i:], m.Name)
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
if m.Options != nil {
- dAtA[i] = 0x12
- i++
- i = encodeVarintContainer(dAtA, i, uint64(m.Options.Size()))
- n2, err := m.Options.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
+ {
+ size, err := m.Options.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintContainer(dAtA, i, uint64(size))
}
- i += n2
+ i--
+ dAtA[i] = 0x12
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if len(m.Name) > 0 {
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintContainer(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *ContainerUpdate) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -413,55 +433,66 @@ func (m *ContainerUpdate) Marshal() (dAtA []byte, err error) {
}
func (m *ContainerUpdate) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ContainerUpdate) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.ID) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintContainer(dAtA, i, uint64(len(m.ID)))
- i += copy(dAtA[i:], m.ID)
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
- if len(m.Image) > 0 {
- dAtA[i] = 0x12
- i++
- i = encodeVarintContainer(dAtA, i, uint64(len(m.Image)))
- i += copy(dAtA[i:], m.Image)
+ if len(m.SnapshotKey) > 0 {
+ i -= len(m.SnapshotKey)
+ copy(dAtA[i:], m.SnapshotKey)
+ i = encodeVarintContainer(dAtA, i, uint64(len(m.SnapshotKey)))
+ i--
+ dAtA[i] = 0x22
}
if len(m.Labels) > 0 {
- for k, _ := range m.Labels {
- dAtA[i] = 0x1a
- i++
+ for k := range m.Labels {
v := m.Labels[k]
- mapSize := 1 + len(k) + sovContainer(uint64(len(k))) + 1 + len(v) + sovContainer(uint64(len(v)))
- i = encodeVarintContainer(dAtA, i, uint64(mapSize))
- dAtA[i] = 0xa
- i++
- i = encodeVarintContainer(dAtA, i, uint64(len(k)))
- i += copy(dAtA[i:], k)
- dAtA[i] = 0x12
- i++
+ baseI := i
+ i -= len(v)
+ copy(dAtA[i:], v)
i = encodeVarintContainer(dAtA, i, uint64(len(v)))
- i += copy(dAtA[i:], v)
+ i--
+ dAtA[i] = 0x12
+ i -= len(k)
+ copy(dAtA[i:], k)
+ i = encodeVarintContainer(dAtA, i, uint64(len(k)))
+ i--
+ dAtA[i] = 0xa
+ i = encodeVarintContainer(dAtA, i, uint64(baseI-i))
+ i--
+ dAtA[i] = 0x1a
}
}
- if len(m.SnapshotKey) > 0 {
- dAtA[i] = 0x22
- i++
- i = encodeVarintContainer(dAtA, i, uint64(len(m.SnapshotKey)))
- i += copy(dAtA[i:], m.SnapshotKey)
+ if len(m.Image) > 0 {
+ i -= len(m.Image)
+ copy(dAtA[i:], m.Image)
+ i = encodeVarintContainer(dAtA, i, uint64(len(m.Image)))
+ i--
+ dAtA[i] = 0x12
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if len(m.ID) > 0 {
+ i -= len(m.ID)
+ copy(dAtA[i:], m.ID)
+ i = encodeVarintContainer(dAtA, i, uint64(len(m.ID)))
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *ContainerDelete) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -469,30 +500,39 @@ func (m *ContainerDelete) Marshal() (dAtA []byte, err error) {
}
func (m *ContainerDelete) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ContainerDelete) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if len(m.ID) > 0 {
- dAtA[i] = 0xa
- i++
+ i -= len(m.ID)
+ copy(dAtA[i:], m.ID)
i = encodeVarintContainer(dAtA, i, uint64(len(m.ID)))
- i += copy(dAtA[i:], m.ID)
- }
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func encodeVarintContainer(dAtA []byte, offset int, v uint64) int {
+ offset -= sovContainer(v)
+ base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
- return offset + 1
+ return base
}
func (m *ContainerCreate) Size() (n int) {
if m == nil {
@@ -587,14 +627,7 @@ func (m *ContainerDelete) Size() (n int) {
}
func sovContainer(x uint64) (n int) {
- for {
- n++
- x >>= 7
- if x == 0 {
- break
- }
- }
- return n
+ return (math_bits.Len64(x|1) + 6) / 7
}
func sozContainer(x uint64) (n int) {
return sovContainer(uint64((x << 1) ^ uint64((int64(x) >> 63))))
@@ -1309,6 +1342,7 @@ func (m *ContainerDelete) Unmarshal(dAtA []byte) error {
func skipContainer(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
+ depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
@@ -1340,10 +1374,8 @@ func skipContainer(dAtA []byte) (n int, err error) {
break
}
}
- return iNdEx, nil
case 1:
iNdEx += 8
- return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
@@ -1364,55 +1396,30 @@ func skipContainer(dAtA []byte) (n int, err error) {
return 0, ErrInvalidLengthContainer
}
iNdEx += length
- if iNdEx < 0 {
- return 0, ErrInvalidLengthContainer
- }
- return iNdEx, nil
case 3:
- for {
- var innerWire uint64
- var start int = iNdEx
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowContainer
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- innerWire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- innerWireType := int(innerWire & 0x7)
- if innerWireType == 4 {
- break
- }
- next, err := skipContainer(dAtA[start:])
- if err != nil {
- return 0, err
- }
- iNdEx = start + next
- if iNdEx < 0 {
- return 0, ErrInvalidLengthContainer
- }
- }
- return iNdEx, nil
+ depth++
case 4:
- return iNdEx, nil
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupContainer
+ }
+ depth--
case 5:
iNdEx += 4
- return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthContainer
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
}
- panic("unreachable")
+ return 0, io.ErrUnexpectedEOF
}
var (
- ErrInvalidLengthContainer = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowContainer = fmt.Errorf("proto: integer overflow")
+ ErrInvalidLengthContainer = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowContainer = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupContainer = fmt.Errorf("proto: unexpected end of group")
)
diff --git a/vendor/github.com/containerd/containerd/api/events/content.pb.go b/vendor/github.com/containerd/containerd/api/events/content.pb.go
index c6ae8a5f2..959ea72d8 100644
--- a/vendor/github.com/containerd/containerd/api/events/content.pb.go
+++ b/vendor/github.com/containerd/containerd/api/events/content.pb.go
@@ -9,6 +9,7 @@ import (
github_com_opencontainers_go_digest "github.com/opencontainers/go-digest"
io "io"
math "math"
+ math_bits "math/bits"
reflect "reflect"
strings "strings"
)
@@ -22,7 +23,7 @@ var _ = math.Inf
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
type ContentDelete struct {
Digest github_com_opencontainers_go_digest.Digest `protobuf:"bytes,1,opt,name=digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"digest"`
@@ -44,7 +45,7 @@ func (m *ContentDelete) XXX_Marshal(b []byte, deterministic bool) ([]byte, error
return xxx_messageInfo_ContentDelete.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -106,7 +107,7 @@ func (m *ContentDelete) Field(fieldpath []string) (string, bool) {
func (m *ContentDelete) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -114,30 +115,39 @@ func (m *ContentDelete) Marshal() (dAtA []byte, err error) {
}
func (m *ContentDelete) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ContentDelete) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if len(m.Digest) > 0 {
- dAtA[i] = 0xa
- i++
+ i -= len(m.Digest)
+ copy(dAtA[i:], m.Digest)
i = encodeVarintContent(dAtA, i, uint64(len(m.Digest)))
- i += copy(dAtA[i:], m.Digest)
- }
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func encodeVarintContent(dAtA []byte, offset int, v uint64) int {
+ offset -= sovContent(v)
+ base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
- return offset + 1
+ return base
}
func (m *ContentDelete) Size() (n int) {
if m == nil {
@@ -156,14 +166,7 @@ func (m *ContentDelete) Size() (n int) {
}
func sovContent(x uint64) (n int) {
- for {
- n++
- x >>= 7
- if x == 0 {
- break
- }
- }
- return n
+ return (math_bits.Len64(x|1) + 6) / 7
}
func sozContent(x uint64) (n int) {
return sovContent(uint64((x << 1) ^ uint64((int64(x) >> 63))))
@@ -276,6 +279,7 @@ func (m *ContentDelete) Unmarshal(dAtA []byte) error {
func skipContent(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
+ depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
@@ -307,10 +311,8 @@ func skipContent(dAtA []byte) (n int, err error) {
break
}
}
- return iNdEx, nil
case 1:
iNdEx += 8
- return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
@@ -331,55 +333,30 @@ func skipContent(dAtA []byte) (n int, err error) {
return 0, ErrInvalidLengthContent
}
iNdEx += length
- if iNdEx < 0 {
- return 0, ErrInvalidLengthContent
- }
- return iNdEx, nil
case 3:
- for {
- var innerWire uint64
- var start int = iNdEx
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowContent
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- innerWire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- innerWireType := int(innerWire & 0x7)
- if innerWireType == 4 {
- break
- }
- next, err := skipContent(dAtA[start:])
- if err != nil {
- return 0, err
- }
- iNdEx = start + next
- if iNdEx < 0 {
- return 0, ErrInvalidLengthContent
- }
- }
- return iNdEx, nil
+ depth++
case 4:
- return iNdEx, nil
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupContent
+ }
+ depth--
case 5:
iNdEx += 4
- return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthContent
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
}
- panic("unreachable")
+ return 0, io.ErrUnexpectedEOF
}
var (
- ErrInvalidLengthContent = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowContent = fmt.Errorf("proto: integer overflow")
+ ErrInvalidLengthContent = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowContent = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupContent = fmt.Errorf("proto: unexpected end of group")
)
diff --git a/vendor/github.com/containerd/containerd/api/events/image.pb.go b/vendor/github.com/containerd/containerd/api/events/image.pb.go
index e0c58aa49..13f60b017 100644
--- a/vendor/github.com/containerd/containerd/api/events/image.pb.go
+++ b/vendor/github.com/containerd/containerd/api/events/image.pb.go
@@ -9,6 +9,7 @@ import (
github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
io "io"
math "math"
+ math_bits "math/bits"
reflect "reflect"
strings "strings"
)
@@ -22,7 +23,7 @@ var _ = math.Inf
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
type ImageCreate struct {
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
@@ -45,7 +46,7 @@ func (m *ImageCreate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
return xxx_messageInfo_ImageCreate.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -85,7 +86,7 @@ func (m *ImageUpdate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
return xxx_messageInfo_ImageUpdate.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -124,7 +125,7 @@ func (m *ImageDelete) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
return xxx_messageInfo_ImageDelete.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -238,7 +239,7 @@ func (m *ImageDelete) Field(fieldpath []string) (string, bool) {
func (m *ImageCreate) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -246,43 +247,52 @@ func (m *ImageCreate) Marshal() (dAtA []byte, err error) {
}
func (m *ImageCreate) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ImageCreate) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.Name) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintImage(dAtA, i, uint64(len(m.Name)))
- i += copy(dAtA[i:], m.Name)
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
if len(m.Labels) > 0 {
- for k, _ := range m.Labels {
- dAtA[i] = 0x12
- i++
+ for k := range m.Labels {
v := m.Labels[k]
- mapSize := 1 + len(k) + sovImage(uint64(len(k))) + 1 + len(v) + sovImage(uint64(len(v)))
- i = encodeVarintImage(dAtA, i, uint64(mapSize))
- dAtA[i] = 0xa
- i++
+ baseI := i
+ i -= len(v)
+ copy(dAtA[i:], v)
+ i = encodeVarintImage(dAtA, i, uint64(len(v)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(k)
+ copy(dAtA[i:], k)
i = encodeVarintImage(dAtA, i, uint64(len(k)))
- i += copy(dAtA[i:], k)
+ i--
+ dAtA[i] = 0xa
+ i = encodeVarintImage(dAtA, i, uint64(baseI-i))
+ i--
dAtA[i] = 0x12
- i++
- i = encodeVarintImage(dAtA, i, uint64(len(v)))
- i += copy(dAtA[i:], v)
}
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if len(m.Name) > 0 {
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintImage(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *ImageUpdate) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -290,43 +300,52 @@ func (m *ImageUpdate) Marshal() (dAtA []byte, err error) {
}
func (m *ImageUpdate) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ImageUpdate) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.Name) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintImage(dAtA, i, uint64(len(m.Name)))
- i += copy(dAtA[i:], m.Name)
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
if len(m.Labels) > 0 {
- for k, _ := range m.Labels {
- dAtA[i] = 0x12
- i++
+ for k := range m.Labels {
v := m.Labels[k]
- mapSize := 1 + len(k) + sovImage(uint64(len(k))) + 1 + len(v) + sovImage(uint64(len(v)))
- i = encodeVarintImage(dAtA, i, uint64(mapSize))
- dAtA[i] = 0xa
- i++
+ baseI := i
+ i -= len(v)
+ copy(dAtA[i:], v)
+ i = encodeVarintImage(dAtA, i, uint64(len(v)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(k)
+ copy(dAtA[i:], k)
i = encodeVarintImage(dAtA, i, uint64(len(k)))
- i += copy(dAtA[i:], k)
+ i--
+ dAtA[i] = 0xa
+ i = encodeVarintImage(dAtA, i, uint64(baseI-i))
+ i--
dAtA[i] = 0x12
- i++
- i = encodeVarintImage(dAtA, i, uint64(len(v)))
- i += copy(dAtA[i:], v)
}
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if len(m.Name) > 0 {
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintImage(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *ImageDelete) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -334,30 +353,39 @@ func (m *ImageDelete) Marshal() (dAtA []byte, err error) {
}
func (m *ImageDelete) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ImageDelete) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if len(m.Name) > 0 {
- dAtA[i] = 0xa
- i++
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
i = encodeVarintImage(dAtA, i, uint64(len(m.Name)))
- i += copy(dAtA[i:], m.Name)
- }
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func encodeVarintImage(dAtA []byte, offset int, v uint64) int {
+ offset -= sovImage(v)
+ base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
- return offset + 1
+ return base
}
func (m *ImageCreate) Size() (n int) {
if m == nil {
@@ -424,14 +452,7 @@ func (m *ImageDelete) Size() (n int) {
}
func sovImage(x uint64) (n int) {
- for {
- n++
- x >>= 7
- if x == 0 {
- break
- }
- }
- return n
+ return (math_bits.Len64(x|1) + 6) / 7
}
func sozImage(x uint64) (n int) {
return sovImage(uint64((x << 1) ^ uint64((int64(x) >> 63))))
@@ -1014,6 +1035,7 @@ func (m *ImageDelete) Unmarshal(dAtA []byte) error {
func skipImage(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
+ depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
@@ -1045,10 +1067,8 @@ func skipImage(dAtA []byte) (n int, err error) {
break
}
}
- return iNdEx, nil
case 1:
iNdEx += 8
- return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
@@ -1069,55 +1089,30 @@ func skipImage(dAtA []byte) (n int, err error) {
return 0, ErrInvalidLengthImage
}
iNdEx += length
- if iNdEx < 0 {
- return 0, ErrInvalidLengthImage
- }
- return iNdEx, nil
case 3:
- for {
- var innerWire uint64
- var start int = iNdEx
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowImage
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- innerWire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- innerWireType := int(innerWire & 0x7)
- if innerWireType == 4 {
- break
- }
- next, err := skipImage(dAtA[start:])
- if err != nil {
- return 0, err
- }
- iNdEx = start + next
- if iNdEx < 0 {
- return 0, ErrInvalidLengthImage
- }
- }
- return iNdEx, nil
+ depth++
case 4:
- return iNdEx, nil
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupImage
+ }
+ depth--
case 5:
iNdEx += 4
- return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthImage
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
}
- panic("unreachable")
+ return 0, io.ErrUnexpectedEOF
}
var (
- ErrInvalidLengthImage = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowImage = fmt.Errorf("proto: integer overflow")
+ ErrInvalidLengthImage = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowImage = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupImage = fmt.Errorf("proto: unexpected end of group")
)
diff --git a/vendor/github.com/containerd/containerd/api/events/namespace.pb.go b/vendor/github.com/containerd/containerd/api/events/namespace.pb.go
index 84882e573..37c3b78cf 100644
--- a/vendor/github.com/containerd/containerd/api/events/namespace.pb.go
+++ b/vendor/github.com/containerd/containerd/api/events/namespace.pb.go
@@ -9,6 +9,7 @@ import (
github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
io "io"
math "math"
+ math_bits "math/bits"
reflect "reflect"
strings "strings"
)
@@ -22,7 +23,7 @@ var _ = math.Inf
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
type NamespaceCreate struct {
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
@@ -45,7 +46,7 @@ func (m *NamespaceCreate) XXX_Marshal(b []byte, deterministic bool) ([]byte, err
return xxx_messageInfo_NamespaceCreate.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -85,7 +86,7 @@ func (m *NamespaceUpdate) XXX_Marshal(b []byte, deterministic bool) ([]byte, err
return xxx_messageInfo_NamespaceUpdate.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -124,7 +125,7 @@ func (m *NamespaceDelete) XXX_Marshal(b []byte, deterministic bool) ([]byte, err
return xxx_messageInfo_NamespaceDelete.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -238,7 +239,7 @@ func (m *NamespaceDelete) Field(fieldpath []string) (string, bool) {
func (m *NamespaceCreate) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -246,43 +247,52 @@ func (m *NamespaceCreate) Marshal() (dAtA []byte, err error) {
}
func (m *NamespaceCreate) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *NamespaceCreate) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.Name) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintNamespace(dAtA, i, uint64(len(m.Name)))
- i += copy(dAtA[i:], m.Name)
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
if len(m.Labels) > 0 {
- for k, _ := range m.Labels {
- dAtA[i] = 0x12
- i++
+ for k := range m.Labels {
v := m.Labels[k]
- mapSize := 1 + len(k) + sovNamespace(uint64(len(k))) + 1 + len(v) + sovNamespace(uint64(len(v)))
- i = encodeVarintNamespace(dAtA, i, uint64(mapSize))
- dAtA[i] = 0xa
- i++
+ baseI := i
+ i -= len(v)
+ copy(dAtA[i:], v)
+ i = encodeVarintNamespace(dAtA, i, uint64(len(v)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(k)
+ copy(dAtA[i:], k)
i = encodeVarintNamespace(dAtA, i, uint64(len(k)))
- i += copy(dAtA[i:], k)
+ i--
+ dAtA[i] = 0xa
+ i = encodeVarintNamespace(dAtA, i, uint64(baseI-i))
+ i--
dAtA[i] = 0x12
- i++
- i = encodeVarintNamespace(dAtA, i, uint64(len(v)))
- i += copy(dAtA[i:], v)
}
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if len(m.Name) > 0 {
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintNamespace(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *NamespaceUpdate) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -290,43 +300,52 @@ func (m *NamespaceUpdate) Marshal() (dAtA []byte, err error) {
}
func (m *NamespaceUpdate) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *NamespaceUpdate) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.Name) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintNamespace(dAtA, i, uint64(len(m.Name)))
- i += copy(dAtA[i:], m.Name)
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
if len(m.Labels) > 0 {
- for k, _ := range m.Labels {
- dAtA[i] = 0x12
- i++
+ for k := range m.Labels {
v := m.Labels[k]
- mapSize := 1 + len(k) + sovNamespace(uint64(len(k))) + 1 + len(v) + sovNamespace(uint64(len(v)))
- i = encodeVarintNamespace(dAtA, i, uint64(mapSize))
- dAtA[i] = 0xa
- i++
+ baseI := i
+ i -= len(v)
+ copy(dAtA[i:], v)
+ i = encodeVarintNamespace(dAtA, i, uint64(len(v)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(k)
+ copy(dAtA[i:], k)
i = encodeVarintNamespace(dAtA, i, uint64(len(k)))
- i += copy(dAtA[i:], k)
+ i--
+ dAtA[i] = 0xa
+ i = encodeVarintNamespace(dAtA, i, uint64(baseI-i))
+ i--
dAtA[i] = 0x12
- i++
- i = encodeVarintNamespace(dAtA, i, uint64(len(v)))
- i += copy(dAtA[i:], v)
}
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if len(m.Name) > 0 {
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintNamespace(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *NamespaceDelete) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -334,30 +353,39 @@ func (m *NamespaceDelete) Marshal() (dAtA []byte, err error) {
}
func (m *NamespaceDelete) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *NamespaceDelete) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if len(m.Name) > 0 {
- dAtA[i] = 0xa
- i++
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
i = encodeVarintNamespace(dAtA, i, uint64(len(m.Name)))
- i += copy(dAtA[i:], m.Name)
- }
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func encodeVarintNamespace(dAtA []byte, offset int, v uint64) int {
+ offset -= sovNamespace(v)
+ base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
- return offset + 1
+ return base
}
func (m *NamespaceCreate) Size() (n int) {
if m == nil {
@@ -424,14 +452,7 @@ func (m *NamespaceDelete) Size() (n int) {
}
func sovNamespace(x uint64) (n int) {
- for {
- n++
- x >>= 7
- if x == 0 {
- break
- }
- }
- return n
+ return (math_bits.Len64(x|1) + 6) / 7
}
func sozNamespace(x uint64) (n int) {
return sovNamespace(uint64((x << 1) ^ uint64((int64(x) >> 63))))
@@ -1014,6 +1035,7 @@ func (m *NamespaceDelete) Unmarshal(dAtA []byte) error {
func skipNamespace(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
+ depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
@@ -1045,10 +1067,8 @@ func skipNamespace(dAtA []byte) (n int, err error) {
break
}
}
- return iNdEx, nil
case 1:
iNdEx += 8
- return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
@@ -1069,55 +1089,30 @@ func skipNamespace(dAtA []byte) (n int, err error) {
return 0, ErrInvalidLengthNamespace
}
iNdEx += length
- if iNdEx < 0 {
- return 0, ErrInvalidLengthNamespace
- }
- return iNdEx, nil
case 3:
- for {
- var innerWire uint64
- var start int = iNdEx
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowNamespace
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- innerWire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- innerWireType := int(innerWire & 0x7)
- if innerWireType == 4 {
- break
- }
- next, err := skipNamespace(dAtA[start:])
- if err != nil {
- return 0, err
- }
- iNdEx = start + next
- if iNdEx < 0 {
- return 0, ErrInvalidLengthNamespace
- }
- }
- return iNdEx, nil
+ depth++
case 4:
- return iNdEx, nil
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupNamespace
+ }
+ depth--
case 5:
iNdEx += 4
- return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthNamespace
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
}
- panic("unreachable")
+ return 0, io.ErrUnexpectedEOF
}
var (
- ErrInvalidLengthNamespace = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowNamespace = fmt.Errorf("proto: integer overflow")
+ ErrInvalidLengthNamespace = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowNamespace = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupNamespace = fmt.Errorf("proto: unexpected end of group")
)
diff --git a/vendor/github.com/containerd/containerd/api/events/snapshot.pb.go b/vendor/github.com/containerd/containerd/api/events/snapshot.pb.go
index 0dbdfdbd6..539297004 100644
--- a/vendor/github.com/containerd/containerd/api/events/snapshot.pb.go
+++ b/vendor/github.com/containerd/containerd/api/events/snapshot.pb.go
@@ -8,6 +8,7 @@ import (
proto "github.com/gogo/protobuf/proto"
io "io"
math "math"
+ math_bits "math/bits"
reflect "reflect"
strings "strings"
)
@@ -21,7 +22,7 @@ var _ = math.Inf
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
type SnapshotPrepare struct {
Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
@@ -44,7 +45,7 @@ func (m *SnapshotPrepare) XXX_Marshal(b []byte, deterministic bool) ([]byte, err
return xxx_messageInfo_SnapshotPrepare.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -84,7 +85,7 @@ func (m *SnapshotCommit) XXX_Marshal(b []byte, deterministic bool) ([]byte, erro
return xxx_messageInfo_SnapshotCommit.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -123,7 +124,7 @@ func (m *SnapshotRemove) XXX_Marshal(b []byte, deterministic bool) ([]byte, erro
return xxx_messageInfo_SnapshotRemove.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -219,7 +220,7 @@ func (m *SnapshotRemove) Field(fieldpath []string) (string, bool) {
func (m *SnapshotPrepare) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -227,32 +228,40 @@ func (m *SnapshotPrepare) Marshal() (dAtA []byte, err error) {
}
func (m *SnapshotPrepare) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SnapshotPrepare) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.Key) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintSnapshot(dAtA, i, uint64(len(m.Key)))
- i += copy(dAtA[i:], m.Key)
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
if len(m.Parent) > 0 {
- dAtA[i] = 0x12
- i++
+ i -= len(m.Parent)
+ copy(dAtA[i:], m.Parent)
i = encodeVarintSnapshot(dAtA, i, uint64(len(m.Parent)))
- i += copy(dAtA[i:], m.Parent)
+ i--
+ dAtA[i] = 0x12
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if len(m.Key) > 0 {
+ i -= len(m.Key)
+ copy(dAtA[i:], m.Key)
+ i = encodeVarintSnapshot(dAtA, i, uint64(len(m.Key)))
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *SnapshotCommit) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -260,32 +269,40 @@ func (m *SnapshotCommit) Marshal() (dAtA []byte, err error) {
}
func (m *SnapshotCommit) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SnapshotCommit) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.Key) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintSnapshot(dAtA, i, uint64(len(m.Key)))
- i += copy(dAtA[i:], m.Key)
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
if len(m.Name) > 0 {
- dAtA[i] = 0x12
- i++
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
i = encodeVarintSnapshot(dAtA, i, uint64(len(m.Name)))
- i += copy(dAtA[i:], m.Name)
+ i--
+ dAtA[i] = 0x12
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if len(m.Key) > 0 {
+ i -= len(m.Key)
+ copy(dAtA[i:], m.Key)
+ i = encodeVarintSnapshot(dAtA, i, uint64(len(m.Key)))
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *SnapshotRemove) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -293,30 +310,39 @@ func (m *SnapshotRemove) Marshal() (dAtA []byte, err error) {
}
func (m *SnapshotRemove) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SnapshotRemove) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if len(m.Key) > 0 {
- dAtA[i] = 0xa
- i++
+ i -= len(m.Key)
+ copy(dAtA[i:], m.Key)
i = encodeVarintSnapshot(dAtA, i, uint64(len(m.Key)))
- i += copy(dAtA[i:], m.Key)
- }
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func encodeVarintSnapshot(dAtA []byte, offset int, v uint64) int {
+ offset -= sovSnapshot(v)
+ base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
- return offset + 1
+ return base
}
func (m *SnapshotPrepare) Size() (n int) {
if m == nil {
@@ -375,14 +401,7 @@ func (m *SnapshotRemove) Size() (n int) {
}
func sovSnapshot(x uint64) (n int) {
- for {
- n++
- x >>= 7
- if x == 0 {
- break
- }
- }
- return n
+ return (math_bits.Len64(x|1) + 6) / 7
}
func sozSnapshot(x uint64) (n int) {
return sovSnapshot(uint64((x << 1) ^ uint64((int64(x) >> 63))))
@@ -755,6 +774,7 @@ func (m *SnapshotRemove) Unmarshal(dAtA []byte) error {
func skipSnapshot(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
+ depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
@@ -786,10 +806,8 @@ func skipSnapshot(dAtA []byte) (n int, err error) {
break
}
}
- return iNdEx, nil
case 1:
iNdEx += 8
- return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
@@ -810,55 +828,30 @@ func skipSnapshot(dAtA []byte) (n int, err error) {
return 0, ErrInvalidLengthSnapshot
}
iNdEx += length
- if iNdEx < 0 {
- return 0, ErrInvalidLengthSnapshot
- }
- return iNdEx, nil
case 3:
- for {
- var innerWire uint64
- var start int = iNdEx
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowSnapshot
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- innerWire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- innerWireType := int(innerWire & 0x7)
- if innerWireType == 4 {
- break
- }
- next, err := skipSnapshot(dAtA[start:])
- if err != nil {
- return 0, err
- }
- iNdEx = start + next
- if iNdEx < 0 {
- return 0, ErrInvalidLengthSnapshot
- }
- }
- return iNdEx, nil
+ depth++
case 4:
- return iNdEx, nil
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupSnapshot
+ }
+ depth--
case 5:
iNdEx += 4
- return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthSnapshot
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
}
- panic("unreachable")
+ return 0, io.ErrUnexpectedEOF
}
var (
- ErrInvalidLengthSnapshot = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowSnapshot = fmt.Errorf("proto: integer overflow")
+ ErrInvalidLengthSnapshot = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowSnapshot = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupSnapshot = fmt.Errorf("proto: unexpected end of group")
)
diff --git a/vendor/github.com/containerd/containerd/api/events/task.pb.go b/vendor/github.com/containerd/containerd/api/events/task.pb.go
index fb653d028..0f16695e3 100644
--- a/vendor/github.com/containerd/containerd/api/events/task.pb.go
+++ b/vendor/github.com/containerd/containerd/api/events/task.pb.go
@@ -11,6 +11,7 @@ import (
github_com_gogo_protobuf_types "github.com/gogo/protobuf/types"
io "io"
math "math"
+ math_bits "math/bits"
reflect "reflect"
strings "strings"
time "time"
@@ -26,7 +27,7 @@ var _ = time.Kitchen
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
type TaskCreate struct {
ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
@@ -53,7 +54,7 @@ func (m *TaskCreate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_TaskCreate.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -93,7 +94,7 @@ func (m *TaskStart) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_TaskStart.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -138,7 +139,7 @@ func (m *TaskDelete) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_TaskDelete.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -180,7 +181,7 @@ func (m *TaskIO) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_TaskIO.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -223,7 +224,7 @@ func (m *TaskExit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_TaskExit.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -262,7 +263,7 @@ func (m *TaskOOM) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_TaskOOM.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -302,7 +303,7 @@ func (m *TaskExecAdded) XXX_Marshal(b []byte, deterministic bool) ([]byte, error
return xxx_messageInfo_TaskExecAdded.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -343,7 +344,7 @@ func (m *TaskExecStarted) XXX_Marshal(b []byte, deterministic bool) ([]byte, err
return xxx_messageInfo_TaskExecStarted.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -382,7 +383,7 @@ func (m *TaskPaused) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_TaskPaused.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -421,7 +422,7 @@ func (m *TaskResumed) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
return xxx_messageInfo_TaskResumed.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -461,7 +462,7 @@ func (m *TaskCheckpointed) XXX_Marshal(b []byte, deterministic bool) ([]byte, er
return xxx_messageInfo_TaskCheckpointed.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -741,7 +742,7 @@ func (m *TaskCheckpointed) Field(fieldpath []string) (string, bool) {
func (m *TaskCreate) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -749,65 +750,78 @@ func (m *TaskCreate) Marshal() (dAtA []byte, err error) {
}
func (m *TaskCreate) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *TaskCreate) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.ContainerID) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintTask(dAtA, i, uint64(len(m.ContainerID)))
- i += copy(dAtA[i:], m.ContainerID)
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
- if len(m.Bundle) > 0 {
- dAtA[i] = 0x12
- i++
- i = encodeVarintTask(dAtA, i, uint64(len(m.Bundle)))
- i += copy(dAtA[i:], m.Bundle)
+ if m.Pid != 0 {
+ i = encodeVarintTask(dAtA, i, uint64(m.Pid))
+ i--
+ dAtA[i] = 0x30
}
- if len(m.Rootfs) > 0 {
- for _, msg := range m.Rootfs {
- dAtA[i] = 0x1a
- i++
- i = encodeVarintTask(dAtA, i, uint64(msg.Size()))
- n, err := msg.MarshalTo(dAtA[i:])
+ if len(m.Checkpoint) > 0 {
+ i -= len(m.Checkpoint)
+ copy(dAtA[i:], m.Checkpoint)
+ i = encodeVarintTask(dAtA, i, uint64(len(m.Checkpoint)))
+ i--
+ dAtA[i] = 0x2a
+ }
+ if m.IO != nil {
+ {
+ size, err := m.IO.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
- i += n
+ i -= size
+ i = encodeVarintTask(dAtA, i, uint64(size))
}
- }
- if m.IO != nil {
+ i--
dAtA[i] = 0x22
- i++
- i = encodeVarintTask(dAtA, i, uint64(m.IO.Size()))
- n1, err := m.IO.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
- }
- i += n1
}
- if len(m.Checkpoint) > 0 {
- dAtA[i] = 0x2a
- i++
- i = encodeVarintTask(dAtA, i, uint64(len(m.Checkpoint)))
- i += copy(dAtA[i:], m.Checkpoint)
+ if len(m.Rootfs) > 0 {
+ for iNdEx := len(m.Rootfs) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Rootfs[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintTask(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
}
- if m.Pid != 0 {
- dAtA[i] = 0x30
- i++
- i = encodeVarintTask(dAtA, i, uint64(m.Pid))
+ if len(m.Bundle) > 0 {
+ i -= len(m.Bundle)
+ copy(dAtA[i:], m.Bundle)
+ i = encodeVarintTask(dAtA, i, uint64(len(m.Bundle)))
+ i--
+ dAtA[i] = 0x12
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if len(m.ContainerID) > 0 {
+ i -= len(m.ContainerID)
+ copy(dAtA[i:], m.ContainerID)
+ i = encodeVarintTask(dAtA, i, uint64(len(m.ContainerID)))
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *TaskStart) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -815,31 +829,38 @@ func (m *TaskStart) Marshal() (dAtA []byte, err error) {
}
func (m *TaskStart) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *TaskStart) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.ContainerID) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintTask(dAtA, i, uint64(len(m.ContainerID)))
- i += copy(dAtA[i:], m.ContainerID)
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
if m.Pid != 0 {
- dAtA[i] = 0x10
- i++
i = encodeVarintTask(dAtA, i, uint64(m.Pid))
+ i--
+ dAtA[i] = 0x10
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if len(m.ContainerID) > 0 {
+ i -= len(m.ContainerID)
+ copy(dAtA[i:], m.ContainerID)
+ i = encodeVarintTask(dAtA, i, uint64(len(m.ContainerID)))
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *TaskDelete) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -847,50 +868,58 @@ func (m *TaskDelete) Marshal() (dAtA []byte, err error) {
}
func (m *TaskDelete) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *TaskDelete) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.ContainerID) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintTask(dAtA, i, uint64(len(m.ContainerID)))
- i += copy(dAtA[i:], m.ContainerID)
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
- if m.Pid != 0 {
- dAtA[i] = 0x10
- i++
- i = encodeVarintTask(dAtA, i, uint64(m.Pid))
+ if len(m.ID) > 0 {
+ i -= len(m.ID)
+ copy(dAtA[i:], m.ID)
+ i = encodeVarintTask(dAtA, i, uint64(len(m.ID)))
+ i--
+ dAtA[i] = 0x2a
}
- if m.ExitStatus != 0 {
- dAtA[i] = 0x18
- i++
- i = encodeVarintTask(dAtA, i, uint64(m.ExitStatus))
+ n2, err2 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.ExitedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt):])
+ if err2 != nil {
+ return 0, err2
}
+ i -= n2
+ i = encodeVarintTask(dAtA, i, uint64(n2))
+ i--
dAtA[i] = 0x22
- i++
- i = encodeVarintTask(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt)))
- n2, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.ExitedAt, dAtA[i:])
- if err != nil {
- return 0, err
+ if m.ExitStatus != 0 {
+ i = encodeVarintTask(dAtA, i, uint64(m.ExitStatus))
+ i--
+ dAtA[i] = 0x18
}
- i += n2
- if len(m.ID) > 0 {
- dAtA[i] = 0x2a
- i++
- i = encodeVarintTask(dAtA, i, uint64(len(m.ID)))
- i += copy(dAtA[i:], m.ID)
+ if m.Pid != 0 {
+ i = encodeVarintTask(dAtA, i, uint64(m.Pid))
+ i--
+ dAtA[i] = 0x10
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if len(m.ContainerID) > 0 {
+ i -= len(m.ContainerID)
+ copy(dAtA[i:], m.ContainerID)
+ i = encodeVarintTask(dAtA, i, uint64(len(m.ContainerID)))
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *TaskIO) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -898,48 +927,57 @@ func (m *TaskIO) Marshal() (dAtA []byte, err error) {
}
func (m *TaskIO) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *TaskIO) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.Stdin) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintTask(dAtA, i, uint64(len(m.Stdin)))
- i += copy(dAtA[i:], m.Stdin)
- }
- if len(m.Stdout) > 0 {
- dAtA[i] = 0x12
- i++
- i = encodeVarintTask(dAtA, i, uint64(len(m.Stdout)))
- i += copy(dAtA[i:], m.Stdout)
- }
- if len(m.Stderr) > 0 {
- dAtA[i] = 0x1a
- i++
- i = encodeVarintTask(dAtA, i, uint64(len(m.Stderr)))
- i += copy(dAtA[i:], m.Stderr)
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
if m.Terminal {
- dAtA[i] = 0x20
- i++
+ i--
if m.Terminal {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
- i++
+ i--
+ dAtA[i] = 0x20
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if len(m.Stderr) > 0 {
+ i -= len(m.Stderr)
+ copy(dAtA[i:], m.Stderr)
+ i = encodeVarintTask(dAtA, i, uint64(len(m.Stderr)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if len(m.Stdout) > 0 {
+ i -= len(m.Stdout)
+ copy(dAtA[i:], m.Stdout)
+ i = encodeVarintTask(dAtA, i, uint64(len(m.Stdout)))
+ i--
+ dAtA[i] = 0x12
}
- return i, nil
+ if len(m.Stdin) > 0 {
+ i -= len(m.Stdin)
+ copy(dAtA[i:], m.Stdin)
+ i = encodeVarintTask(dAtA, i, uint64(len(m.Stdin)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
}
func (m *TaskExit) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -947,50 +985,58 @@ func (m *TaskExit) Marshal() (dAtA []byte, err error) {
}
func (m *TaskExit) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *TaskExit) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.ContainerID) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintTask(dAtA, i, uint64(len(m.ContainerID)))
- i += copy(dAtA[i:], m.ContainerID)
- }
- if len(m.ID) > 0 {
- dAtA[i] = 0x12
- i++
- i = encodeVarintTask(dAtA, i, uint64(len(m.ID)))
- i += copy(dAtA[i:], m.ID)
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
- if m.Pid != 0 {
- dAtA[i] = 0x18
- i++
- i = encodeVarintTask(dAtA, i, uint64(m.Pid))
+ n3, err3 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.ExitedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt):])
+ if err3 != nil {
+ return 0, err3
}
+ i -= n3
+ i = encodeVarintTask(dAtA, i, uint64(n3))
+ i--
+ dAtA[i] = 0x2a
if m.ExitStatus != 0 {
- dAtA[i] = 0x20
- i++
i = encodeVarintTask(dAtA, i, uint64(m.ExitStatus))
+ i--
+ dAtA[i] = 0x20
}
- dAtA[i] = 0x2a
- i++
- i = encodeVarintTask(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt)))
- n3, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.ExitedAt, dAtA[i:])
- if err != nil {
- return 0, err
+ if m.Pid != 0 {
+ i = encodeVarintTask(dAtA, i, uint64(m.Pid))
+ i--
+ dAtA[i] = 0x18
}
- i += n3
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if len(m.ID) > 0 {
+ i -= len(m.ID)
+ copy(dAtA[i:], m.ID)
+ i = encodeVarintTask(dAtA, i, uint64(len(m.ID)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.ContainerID) > 0 {
+ i -= len(m.ContainerID)
+ copy(dAtA[i:], m.ContainerID)
+ i = encodeVarintTask(dAtA, i, uint64(len(m.ContainerID)))
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *TaskOOM) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -998,26 +1044,33 @@ func (m *TaskOOM) Marshal() (dAtA []byte, err error) {
}
func (m *TaskOOM) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *TaskOOM) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if len(m.ContainerID) > 0 {
- dAtA[i] = 0xa
- i++
+ i -= len(m.ContainerID)
+ copy(dAtA[i:], m.ContainerID)
i = encodeVarintTask(dAtA, i, uint64(len(m.ContainerID)))
- i += copy(dAtA[i:], m.ContainerID)
- }
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *TaskExecAdded) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1025,32 +1078,40 @@ func (m *TaskExecAdded) Marshal() (dAtA []byte, err error) {
}
func (m *TaskExecAdded) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *TaskExecAdded) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.ContainerID) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintTask(dAtA, i, uint64(len(m.ContainerID)))
- i += copy(dAtA[i:], m.ContainerID)
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
if len(m.ExecID) > 0 {
- dAtA[i] = 0x12
- i++
+ i -= len(m.ExecID)
+ copy(dAtA[i:], m.ExecID)
i = encodeVarintTask(dAtA, i, uint64(len(m.ExecID)))
- i += copy(dAtA[i:], m.ExecID)
+ i--
+ dAtA[i] = 0x12
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if len(m.ContainerID) > 0 {
+ i -= len(m.ContainerID)
+ copy(dAtA[i:], m.ContainerID)
+ i = encodeVarintTask(dAtA, i, uint64(len(m.ContainerID)))
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *TaskExecStarted) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1058,37 +1119,45 @@ func (m *TaskExecStarted) Marshal() (dAtA []byte, err error) {
}
func (m *TaskExecStarted) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *TaskExecStarted) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.ContainerID) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintTask(dAtA, i, uint64(len(m.ContainerID)))
- i += copy(dAtA[i:], m.ContainerID)
- }
- if len(m.ExecID) > 0 {
- dAtA[i] = 0x12
- i++
- i = encodeVarintTask(dAtA, i, uint64(len(m.ExecID)))
- i += copy(dAtA[i:], m.ExecID)
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
if m.Pid != 0 {
- dAtA[i] = 0x18
- i++
i = encodeVarintTask(dAtA, i, uint64(m.Pid))
+ i--
+ dAtA[i] = 0x18
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if len(m.ExecID) > 0 {
+ i -= len(m.ExecID)
+ copy(dAtA[i:], m.ExecID)
+ i = encodeVarintTask(dAtA, i, uint64(len(m.ExecID)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.ContainerID) > 0 {
+ i -= len(m.ContainerID)
+ copy(dAtA[i:], m.ContainerID)
+ i = encodeVarintTask(dAtA, i, uint64(len(m.ContainerID)))
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *TaskPaused) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1096,26 +1165,33 @@ func (m *TaskPaused) Marshal() (dAtA []byte, err error) {
}
func (m *TaskPaused) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *TaskPaused) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if len(m.ContainerID) > 0 {
- dAtA[i] = 0xa
- i++
+ i -= len(m.ContainerID)
+ copy(dAtA[i:], m.ContainerID)
i = encodeVarintTask(dAtA, i, uint64(len(m.ContainerID)))
- i += copy(dAtA[i:], m.ContainerID)
- }
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *TaskResumed) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1123,26 +1199,33 @@ func (m *TaskResumed) Marshal() (dAtA []byte, err error) {
}
func (m *TaskResumed) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *TaskResumed) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if len(m.ContainerID) > 0 {
- dAtA[i] = 0xa
- i++
+ i -= len(m.ContainerID)
+ copy(dAtA[i:], m.ContainerID)
i = encodeVarintTask(dAtA, i, uint64(len(m.ContainerID)))
- i += copy(dAtA[i:], m.ContainerID)
- }
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *TaskCheckpointed) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1150,36 +1233,46 @@ func (m *TaskCheckpointed) Marshal() (dAtA []byte, err error) {
}
func (m *TaskCheckpointed) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *TaskCheckpointed) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.ContainerID) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintTask(dAtA, i, uint64(len(m.ContainerID)))
- i += copy(dAtA[i:], m.ContainerID)
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
if len(m.Checkpoint) > 0 {
- dAtA[i] = 0x12
- i++
+ i -= len(m.Checkpoint)
+ copy(dAtA[i:], m.Checkpoint)
i = encodeVarintTask(dAtA, i, uint64(len(m.Checkpoint)))
- i += copy(dAtA[i:], m.Checkpoint)
+ i--
+ dAtA[i] = 0x12
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if len(m.ContainerID) > 0 {
+ i -= len(m.ContainerID)
+ copy(dAtA[i:], m.ContainerID)
+ i = encodeVarintTask(dAtA, i, uint64(len(m.ContainerID)))
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func encodeVarintTask(dAtA []byte, offset int, v uint64) int {
+ offset -= sovTask(v)
+ base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
- return offset + 1
+ return base
}
func (m *TaskCreate) Size() (n int) {
if m == nil {
@@ -1432,14 +1525,7 @@ func (m *TaskCheckpointed) Size() (n int) {
}
func sovTask(x uint64) (n int) {
- for {
- n++
- x >>= 7
- if x == 0 {
- break
- }
- }
- return n
+ return (math_bits.Len64(x|1) + 6) / 7
}
func sozTask(x uint64) (n int) {
return sovTask(uint64((x << 1) ^ uint64((int64(x) >> 63))))
@@ -1448,11 +1534,16 @@ func (this *TaskCreate) String() string {
if this == nil {
return "nil"
}
+ repeatedStringForRootfs := "[]*Mount{"
+ for _, f := range this.Rootfs {
+ repeatedStringForRootfs += strings.Replace(fmt.Sprintf("%v", f), "Mount", "types.Mount", 1) + ","
+ }
+ repeatedStringForRootfs += "}"
s := strings.Join([]string{`&TaskCreate{`,
`ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`,
`Bundle:` + fmt.Sprintf("%v", this.Bundle) + `,`,
- `Rootfs:` + strings.Replace(fmt.Sprintf("%v", this.Rootfs), "Mount", "types.Mount", 1) + `,`,
- `IO:` + strings.Replace(fmt.Sprintf("%v", this.IO), "TaskIO", "TaskIO", 1) + `,`,
+ `Rootfs:` + repeatedStringForRootfs + `,`,
+ `IO:` + strings.Replace(this.IO.String(), "TaskIO", "TaskIO", 1) + `,`,
`Checkpoint:` + fmt.Sprintf("%v", this.Checkpoint) + `,`,
`Pid:` + fmt.Sprintf("%v", this.Pid) + `,`,
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
@@ -1480,7 +1571,7 @@ func (this *TaskDelete) String() string {
`ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`,
`Pid:` + fmt.Sprintf("%v", this.Pid) + `,`,
`ExitStatus:` + fmt.Sprintf("%v", this.ExitStatus) + `,`,
- `ExitedAt:` + strings.Replace(strings.Replace(this.ExitedAt.String(), "Timestamp", "types1.Timestamp", 1), `&`, ``, 1) + `,`,
+ `ExitedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ExitedAt), "Timestamp", "types1.Timestamp", 1), `&`, ``, 1) + `,`,
`ID:` + fmt.Sprintf("%v", this.ID) + `,`,
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
`}`,
@@ -1510,7 +1601,7 @@ func (this *TaskExit) String() string {
`ID:` + fmt.Sprintf("%v", this.ID) + `,`,
`Pid:` + fmt.Sprintf("%v", this.Pid) + `,`,
`ExitStatus:` + fmt.Sprintf("%v", this.ExitStatus) + `,`,
- `ExitedAt:` + strings.Replace(strings.Replace(this.ExitedAt.String(), "Timestamp", "types1.Timestamp", 1), `&`, ``, 1) + `,`,
+ `ExitedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ExitedAt), "Timestamp", "types1.Timestamp", 1), `&`, ``, 1) + `,`,
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
`}`,
}, "")
@@ -3120,6 +3211,7 @@ func (m *TaskCheckpointed) Unmarshal(dAtA []byte) error {
func skipTask(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
+ depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
@@ -3151,10 +3243,8 @@ func skipTask(dAtA []byte) (n int, err error) {
break
}
}
- return iNdEx, nil
case 1:
iNdEx += 8
- return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
@@ -3175,55 +3265,30 @@ func skipTask(dAtA []byte) (n int, err error) {
return 0, ErrInvalidLengthTask
}
iNdEx += length
- if iNdEx < 0 {
- return 0, ErrInvalidLengthTask
- }
- return iNdEx, nil
case 3:
- for {
- var innerWire uint64
- var start int = iNdEx
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowTask
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- innerWire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- innerWireType := int(innerWire & 0x7)
- if innerWireType == 4 {
- break
- }
- next, err := skipTask(dAtA[start:])
- if err != nil {
- return 0, err
- }
- iNdEx = start + next
- if iNdEx < 0 {
- return 0, ErrInvalidLengthTask
- }
- }
- return iNdEx, nil
+ depth++
case 4:
- return iNdEx, nil
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupTask
+ }
+ depth--
case 5:
iNdEx += 4
- return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthTask
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
}
- panic("unreachable")
+ return 0, io.ErrUnexpectedEOF
}
var (
- ErrInvalidLengthTask = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowTask = fmt.Errorf("proto: integer overflow")
+ ErrInvalidLengthTask = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowTask = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupTask = fmt.Errorf("proto: unexpected end of group")
)
diff --git a/vendor/github.com/containerd/containerd/api/services/containers/v1/containers.pb.go b/vendor/github.com/containerd/containerd/api/services/containers/v1/containers.pb.go
index ce3cef07d..d951b2683 100644
--- a/vendor/github.com/containerd/containerd/api/services/containers/v1/containers.pb.go
+++ b/vendor/github.com/containerd/containerd/api/services/containers/v1/containers.pb.go
@@ -11,8 +11,11 @@ import (
github_com_gogo_protobuf_types "github.com/gogo/protobuf/types"
types "github.com/gogo/protobuf/types"
grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
io "io"
math "math"
+ math_bits "math/bits"
reflect "reflect"
strings "strings"
time "time"
@@ -28,7 +31,7 @@ var _ = time.Kitchen
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
type Container struct {
// ID is the user-specified identifier.
@@ -96,7 +99,7 @@ func (m *Container) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Container.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -138,7 +141,7 @@ func (m *Container_Runtime) XXX_Marshal(b []byte, deterministic bool) ([]byte, e
return xxx_messageInfo_Container_Runtime.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -177,7 +180,7 @@ func (m *GetContainerRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte,
return xxx_messageInfo_GetContainerRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -216,7 +219,7 @@ func (m *GetContainerResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte
return xxx_messageInfo_GetContainerResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -265,7 +268,7 @@ func (m *ListContainersRequest) XXX_Marshal(b []byte, deterministic bool) ([]byt
return xxx_messageInfo_ListContainersRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -304,7 +307,7 @@ func (m *ListContainersResponse) XXX_Marshal(b []byte, deterministic bool) ([]by
return xxx_messageInfo_ListContainersResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -343,7 +346,7 @@ func (m *CreateContainerRequest) XXX_Marshal(b []byte, deterministic bool) ([]by
return xxx_messageInfo_CreateContainerRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -382,7 +385,7 @@ func (m *CreateContainerResponse) XXX_Marshal(b []byte, deterministic bool) ([]b
return xxx_messageInfo_CreateContainerResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -432,7 +435,7 @@ func (m *UpdateContainerRequest) XXX_Marshal(b []byte, deterministic bool) ([]by
return xxx_messageInfo_UpdateContainerRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -471,7 +474,7 @@ func (m *UpdateContainerResponse) XXX_Marshal(b []byte, deterministic bool) ([]b
return xxx_messageInfo_UpdateContainerResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -510,7 +513,7 @@ func (m *DeleteContainerRequest) XXX_Marshal(b []byte, deterministic bool) ([]by
return xxx_messageInfo_DeleteContainerRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -549,7 +552,7 @@ func (m *ListContainerMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte
return xxx_messageInfo_ListContainerMessage.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -760,6 +763,29 @@ type ContainersServer interface {
Delete(context.Context, *DeleteContainerRequest) (*types.Empty, error)
}
+// UnimplementedContainersServer can be embedded to have forward compatible implementations.
+type UnimplementedContainersServer struct {
+}
+
+func (*UnimplementedContainersServer) Get(ctx context.Context, req *GetContainerRequest) (*GetContainerResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Get not implemented")
+}
+func (*UnimplementedContainersServer) List(ctx context.Context, req *ListContainersRequest) (*ListContainersResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method List not implemented")
+}
+func (*UnimplementedContainersServer) ListStream(req *ListContainersRequest, srv Containers_ListStreamServer) error {
+ return status.Errorf(codes.Unimplemented, "method ListStream not implemented")
+}
+func (*UnimplementedContainersServer) Create(ctx context.Context, req *CreateContainerRequest) (*CreateContainerResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Create not implemented")
+}
+func (*UnimplementedContainersServer) Update(ctx context.Context, req *UpdateContainerRequest) (*UpdateContainerResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Update not implemented")
+}
+func (*UnimplementedContainersServer) Delete(ctx context.Context, req *DeleteContainerRequest) (*types.Empty, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Delete not implemented")
+}
+
func RegisterContainersServer(s *grpc.Server, srv ContainersServer) {
s.RegisterService(&_Containers_serviceDesc, srv)
}
@@ -913,7 +939,7 @@ var _Containers_serviceDesc = grpc.ServiceDesc{
func (m *Container) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -921,123 +947,137 @@ func (m *Container) Marshal() (dAtA []byte, err error) {
}
func (m *Container) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Container) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.ID) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintContainers(dAtA, i, uint64(len(m.ID)))
- i += copy(dAtA[i:], m.ID)
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
- if len(m.Labels) > 0 {
- for k, _ := range m.Labels {
+ if len(m.Extensions) > 0 {
+ for k := range m.Extensions {
+ v := m.Extensions[k]
+ baseI := i
+ {
+ size, err := (&v).MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintContainers(dAtA, i, uint64(size))
+ }
+ i--
dAtA[i] = 0x12
- i++
- v := m.Labels[k]
- mapSize := 1 + len(k) + sovContainers(uint64(len(k))) + 1 + len(v) + sovContainers(uint64(len(v)))
- i = encodeVarintContainers(dAtA, i, uint64(mapSize))
- dAtA[i] = 0xa
- i++
+ i -= len(k)
+ copy(dAtA[i:], k)
i = encodeVarintContainers(dAtA, i, uint64(len(k)))
- i += copy(dAtA[i:], k)
- dAtA[i] = 0x12
- i++
- i = encodeVarintContainers(dAtA, i, uint64(len(v)))
- i += copy(dAtA[i:], v)
+ i--
+ dAtA[i] = 0xa
+ i = encodeVarintContainers(dAtA, i, uint64(baseI-i))
+ i--
+ dAtA[i] = 0x52
}
}
- if len(m.Image) > 0 {
- dAtA[i] = 0x1a
- i++
- i = encodeVarintContainers(dAtA, i, uint64(len(m.Image)))
- i += copy(dAtA[i:], m.Image)
+ n2, err2 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.UpdatedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.UpdatedAt):])
+ if err2 != nil {
+ return 0, err2
}
- if m.Runtime != nil {
- dAtA[i] = 0x22
- i++
- i = encodeVarintContainers(dAtA, i, uint64(m.Runtime.Size()))
- n1, err := m.Runtime.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
- }
- i += n1
+ i -= n2
+ i = encodeVarintContainers(dAtA, i, uint64(n2))
+ i--
+ dAtA[i] = 0x4a
+ n3, err3 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.CreatedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.CreatedAt):])
+ if err3 != nil {
+ return 0, err3
}
- if m.Spec != nil {
- dAtA[i] = 0x2a
- i++
- i = encodeVarintContainers(dAtA, i, uint64(m.Spec.Size()))
- n2, err := m.Spec.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
- }
- i += n2
+ i -= n3
+ i = encodeVarintContainers(dAtA, i, uint64(n3))
+ i--
+ dAtA[i] = 0x42
+ if len(m.SnapshotKey) > 0 {
+ i -= len(m.SnapshotKey)
+ copy(dAtA[i:], m.SnapshotKey)
+ i = encodeVarintContainers(dAtA, i, uint64(len(m.SnapshotKey)))
+ i--
+ dAtA[i] = 0x3a
}
if len(m.Snapshotter) > 0 {
- dAtA[i] = 0x32
- i++
+ i -= len(m.Snapshotter)
+ copy(dAtA[i:], m.Snapshotter)
i = encodeVarintContainers(dAtA, i, uint64(len(m.Snapshotter)))
- i += copy(dAtA[i:], m.Snapshotter)
+ i--
+ dAtA[i] = 0x32
}
- if len(m.SnapshotKey) > 0 {
- dAtA[i] = 0x3a
- i++
- i = encodeVarintContainers(dAtA, i, uint64(len(m.SnapshotKey)))
- i += copy(dAtA[i:], m.SnapshotKey)
+ if m.Spec != nil {
+ {
+ size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintContainers(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x2a
}
- dAtA[i] = 0x42
- i++
- i = encodeVarintContainers(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.CreatedAt)))
- n3, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.CreatedAt, dAtA[i:])
- if err != nil {
- return 0, err
+ if m.Runtime != nil {
+ {
+ size, err := m.Runtime.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintContainers(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
}
- i += n3
- dAtA[i] = 0x4a
- i++
- i = encodeVarintContainers(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.UpdatedAt)))
- n4, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.UpdatedAt, dAtA[i:])
- if err != nil {
- return 0, err
+ if len(m.Image) > 0 {
+ i -= len(m.Image)
+ copy(dAtA[i:], m.Image)
+ i = encodeVarintContainers(dAtA, i, uint64(len(m.Image)))
+ i--
+ dAtA[i] = 0x1a
}
- i += n4
- if len(m.Extensions) > 0 {
- for k, _ := range m.Extensions {
- dAtA[i] = 0x52
- i++
- v := m.Extensions[k]
- msgSize := 0
- if (&v) != nil {
- msgSize = (&v).Size()
- msgSize += 1 + sovContainers(uint64(msgSize))
- }
- mapSize := 1 + len(k) + sovContainers(uint64(len(k))) + msgSize
- i = encodeVarintContainers(dAtA, i, uint64(mapSize))
- dAtA[i] = 0xa
- i++
+ if len(m.Labels) > 0 {
+ for k := range m.Labels {
+ v := m.Labels[k]
+ baseI := i
+ i -= len(v)
+ copy(dAtA[i:], v)
+ i = encodeVarintContainers(dAtA, i, uint64(len(v)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(k)
+ copy(dAtA[i:], k)
i = encodeVarintContainers(dAtA, i, uint64(len(k)))
- i += copy(dAtA[i:], k)
+ i--
+ dAtA[i] = 0xa
+ i = encodeVarintContainers(dAtA, i, uint64(baseI-i))
+ i--
dAtA[i] = 0x12
- i++
- i = encodeVarintContainers(dAtA, i, uint64((&v).Size()))
- n5, err := (&v).MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
- }
- i += n5
}
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if len(m.ID) > 0 {
+ i -= len(m.ID)
+ copy(dAtA[i:], m.ID)
+ i = encodeVarintContainers(dAtA, i, uint64(len(m.ID)))
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *Container_Runtime) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1045,36 +1085,45 @@ func (m *Container_Runtime) Marshal() (dAtA []byte, err error) {
}
func (m *Container_Runtime) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Container_Runtime) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.Name) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintContainers(dAtA, i, uint64(len(m.Name)))
- i += copy(dAtA[i:], m.Name)
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
if m.Options != nil {
- dAtA[i] = 0x12
- i++
- i = encodeVarintContainers(dAtA, i, uint64(m.Options.Size()))
- n6, err := m.Options.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
+ {
+ size, err := m.Options.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintContainers(dAtA, i, uint64(size))
}
- i += n6
+ i--
+ dAtA[i] = 0x12
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if len(m.Name) > 0 {
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintContainers(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *GetContainerRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1082,26 +1131,33 @@ func (m *GetContainerRequest) Marshal() (dAtA []byte, err error) {
}
func (m *GetContainerRequest) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *GetContainerRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if len(m.ID) > 0 {
- dAtA[i] = 0xa
- i++
+ i -= len(m.ID)
+ copy(dAtA[i:], m.ID)
i = encodeVarintContainers(dAtA, i, uint64(len(m.ID)))
- i += copy(dAtA[i:], m.ID)
- }
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *GetContainerResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1109,28 +1165,36 @@ func (m *GetContainerResponse) Marshal() (dAtA []byte, err error) {
}
func (m *GetContainerResponse) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *GetContainerResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- dAtA[i] = 0xa
- i++
- i = encodeVarintContainers(dAtA, i, uint64(m.Container.Size()))
- n7, err := m.Container.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
- }
- i += n7
if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ {
+ size, err := m.Container.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintContainers(dAtA, i, uint64(size))
}
- return i, nil
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
}
func (m *ListContainersRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1138,35 +1202,35 @@ func (m *ListContainersRequest) Marshal() (dAtA []byte, err error) {
}
func (m *ListContainersRequest) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ListContainersRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if len(m.Filters) > 0 {
- for _, s := range m.Filters {
+ for iNdEx := len(m.Filters) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Filters[iNdEx])
+ copy(dAtA[i:], m.Filters[iNdEx])
+ i = encodeVarintContainers(dAtA, i, uint64(len(m.Filters[iNdEx])))
+ i--
dAtA[i] = 0xa
- i++
- l = len(s)
- for l >= 1<<7 {
- dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
- l >>= 7
- i++
- }
- dAtA[i] = uint8(l)
- i++
- i += copy(dAtA[i:], s)
}
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
- }
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *ListContainersResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1174,32 +1238,40 @@ func (m *ListContainersResponse) Marshal() (dAtA []byte, err error) {
}
func (m *ListContainersResponse) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ListContainersResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if len(m.Containers) > 0 {
- for _, msg := range m.Containers {
- dAtA[i] = 0xa
- i++
- i = encodeVarintContainers(dAtA, i, uint64(msg.Size()))
- n, err := msg.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
+ for iNdEx := len(m.Containers) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Containers[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintContainers(dAtA, i, uint64(size))
}
- i += n
+ i--
+ dAtA[i] = 0xa
}
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
- }
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *CreateContainerRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1207,28 +1279,36 @@ func (m *CreateContainerRequest) Marshal() (dAtA []byte, err error) {
}
func (m *CreateContainerRequest) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *CreateContainerRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- dAtA[i] = 0xa
- i++
- i = encodeVarintContainers(dAtA, i, uint64(m.Container.Size()))
- n8, err := m.Container.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
- }
- i += n8
if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ {
+ size, err := m.Container.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintContainers(dAtA, i, uint64(size))
}
- return i, nil
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
}
func (m *CreateContainerResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1236,28 +1316,36 @@ func (m *CreateContainerResponse) Marshal() (dAtA []byte, err error) {
}
func (m *CreateContainerResponse) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *CreateContainerResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- dAtA[i] = 0xa
- i++
- i = encodeVarintContainers(dAtA, i, uint64(m.Container.Size()))
- n9, err := m.Container.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
- }
- i += n9
if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ {
+ size, err := m.Container.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintContainers(dAtA, i, uint64(size))
}
- return i, nil
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
}
func (m *UpdateContainerRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1265,38 +1353,48 @@ func (m *UpdateContainerRequest) Marshal() (dAtA []byte, err error) {
}
func (m *UpdateContainerRequest) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *UpdateContainerRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- dAtA[i] = 0xa
- i++
- i = encodeVarintContainers(dAtA, i, uint64(m.Container.Size()))
- n10, err := m.Container.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
- i += n10
if m.UpdateMask != nil {
+ {
+ size, err := m.UpdateMask.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintContainers(dAtA, i, uint64(size))
+ }
+ i--
dAtA[i] = 0x12
- i++
- i = encodeVarintContainers(dAtA, i, uint64(m.UpdateMask.Size()))
- n11, err := m.UpdateMask.MarshalTo(dAtA[i:])
+ }
+ {
+ size, err := m.Container.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
- i += n11
+ i -= size
+ i = encodeVarintContainers(dAtA, i, uint64(size))
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
- }
- return i, nil
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
}
func (m *UpdateContainerResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1304,28 +1402,36 @@ func (m *UpdateContainerResponse) Marshal() (dAtA []byte, err error) {
}
func (m *UpdateContainerResponse) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *UpdateContainerResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- dAtA[i] = 0xa
- i++
- i = encodeVarintContainers(dAtA, i, uint64(m.Container.Size()))
- n12, err := m.Container.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
- }
- i += n12
if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ {
+ size, err := m.Container.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintContainers(dAtA, i, uint64(size))
}
- return i, nil
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
}
func (m *DeleteContainerRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1333,26 +1439,33 @@ func (m *DeleteContainerRequest) Marshal() (dAtA []byte, err error) {
}
func (m *DeleteContainerRequest) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DeleteContainerRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if len(m.ID) > 0 {
- dAtA[i] = 0xa
- i++
+ i -= len(m.ID)
+ copy(dAtA[i:], m.ID)
i = encodeVarintContainers(dAtA, i, uint64(len(m.ID)))
- i += copy(dAtA[i:], m.ID)
- }
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *ListContainerMessage) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1360,34 +1473,44 @@ func (m *ListContainerMessage) Marshal() (dAtA []byte, err error) {
}
func (m *ListContainerMessage) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ListContainerMessage) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if m.Container != nil {
- dAtA[i] = 0xa
- i++
- i = encodeVarintContainers(dAtA, i, uint64(m.Container.Size()))
- n13, err := m.Container.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
+ {
+ size, err := m.Container.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintContainers(dAtA, i, uint64(size))
}
- i += n13
- }
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func encodeVarintContainers(dAtA []byte, offset int, v uint64) int {
+ offset -= sovContainers(v)
+ base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
- return offset + 1
+ return base
}
func (m *Container) Size() (n int) {
if m == nil {
@@ -1625,14 +1748,7 @@ func (m *ListContainerMessage) Size() (n int) {
}
func sovContainers(x uint64) (n int) {
- for {
- n++
- x >>= 7
- if x == 0 {
- break
- }
- }
- return n
+ return (math_bits.Len64(x|1) + 6) / 7
}
func sozContainers(x uint64) (n int) {
return sovContainers(uint64((x << 1) ^ uint64((int64(x) >> 63))))
@@ -1669,8 +1785,8 @@ func (this *Container) String() string {
`Spec:` + strings.Replace(fmt.Sprintf("%v", this.Spec), "Any", "types.Any", 1) + `,`,
`Snapshotter:` + fmt.Sprintf("%v", this.Snapshotter) + `,`,
`SnapshotKey:` + fmt.Sprintf("%v", this.SnapshotKey) + `,`,
- `CreatedAt:` + strings.Replace(strings.Replace(this.CreatedAt.String(), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`,
- `UpdatedAt:` + strings.Replace(strings.Replace(this.UpdatedAt.String(), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`,
+ `CreatedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CreatedAt), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`,
+ `UpdatedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.UpdatedAt), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`,
`Extensions:` + mapStringForExtensions + `,`,
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
`}`,
@@ -1726,8 +1842,13 @@ func (this *ListContainersResponse) String() string {
if this == nil {
return "nil"
}
+ repeatedStringForContainers := "[]Container{"
+ for _, f := range this.Containers {
+ repeatedStringForContainers += strings.Replace(strings.Replace(f.String(), "Container", "Container", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForContainers += "}"
s := strings.Join([]string{`&ListContainersResponse{`,
- `Containers:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Containers), "Container", "Container", 1), `&`, ``, 1) + `,`,
+ `Containers:` + repeatedStringForContainers + `,`,
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
`}`,
}, "")
@@ -1794,7 +1915,7 @@ func (this *ListContainerMessage) String() string {
return "nil"
}
s := strings.Join([]string{`&ListContainerMessage{`,
- `Container:` + strings.Replace(fmt.Sprintf("%v", this.Container), "Container", "Container", 1) + `,`,
+ `Container:` + strings.Replace(this.Container.String(), "Container", "Container", 1) + `,`,
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
`}`,
}, "")
@@ -3416,6 +3537,7 @@ func (m *ListContainerMessage) Unmarshal(dAtA []byte) error {
func skipContainers(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
+ depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
@@ -3447,10 +3569,8 @@ func skipContainers(dAtA []byte) (n int, err error) {
break
}
}
- return iNdEx, nil
case 1:
iNdEx += 8
- return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
@@ -3471,55 +3591,30 @@ func skipContainers(dAtA []byte) (n int, err error) {
return 0, ErrInvalidLengthContainers
}
iNdEx += length
- if iNdEx < 0 {
- return 0, ErrInvalidLengthContainers
- }
- return iNdEx, nil
case 3:
- for {
- var innerWire uint64
- var start int = iNdEx
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowContainers
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- innerWire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- innerWireType := int(innerWire & 0x7)
- if innerWireType == 4 {
- break
- }
- next, err := skipContainers(dAtA[start:])
- if err != nil {
- return 0, err
- }
- iNdEx = start + next
- if iNdEx < 0 {
- return 0, ErrInvalidLengthContainers
- }
- }
- return iNdEx, nil
+ depth++
case 4:
- return iNdEx, nil
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupContainers
+ }
+ depth--
case 5:
iNdEx += 4
- return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthContainers
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
}
- panic("unreachable")
+ return 0, io.ErrUnexpectedEOF
}
var (
- ErrInvalidLengthContainers = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowContainers = fmt.Errorf("proto: integer overflow")
+ ErrInvalidLengthContainers = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowContainers = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupContainers = fmt.Errorf("proto: unexpected end of group")
)
diff --git a/vendor/github.com/containerd/containerd/api/services/content/v1/content.pb.go b/vendor/github.com/containerd/containerd/api/services/content/v1/content.pb.go
index 3e4580024..1cf0aaa91 100644
--- a/vendor/github.com/containerd/containerd/api/services/content/v1/content.pb.go
+++ b/vendor/github.com/containerd/containerd/api/services/content/v1/content.pb.go
@@ -12,8 +12,11 @@ import (
types "github.com/gogo/protobuf/types"
github_com_opencontainers_go_digest "github.com/opencontainers/go-digest"
grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
io "io"
math "math"
+ math_bits "math/bits"
reflect "reflect"
strings "strings"
time "time"
@@ -29,7 +32,7 @@ var _ = time.Kitchen
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
// WriteAction defines the behavior of a WriteRequest.
type WriteAction int32
@@ -106,7 +109,7 @@ func (m *Info) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Info.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -145,7 +148,7 @@ func (m *InfoRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
return xxx_messageInfo_InfoRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -184,7 +187,7 @@ func (m *InfoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
return xxx_messageInfo_InfoResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -230,7 +233,7 @@ func (m *UpdateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error
return xxx_messageInfo_UpdateRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -269,7 +272,7 @@ func (m *UpdateResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, erro
return xxx_messageInfo_UpdateResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -318,7 +321,7 @@ func (m *ListContentRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte,
return xxx_messageInfo_ListContentRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -357,7 +360,7 @@ func (m *ListContentResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte,
return xxx_messageInfo_ListContentResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -397,7 +400,7 @@ func (m *DeleteContentRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte
return xxx_messageInfo_DeleteContentRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -446,7 +449,7 @@ func (m *ReadContentRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte,
return xxx_messageInfo_ReadContentRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -487,7 +490,7 @@ func (m *ReadContentResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte,
return xxx_messageInfo_ReadContentResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -531,7 +534,7 @@ func (m *Status) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Status.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -570,7 +573,7 @@ func (m *StatusRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error
return xxx_messageInfo_StatusRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -609,7 +612,7 @@ func (m *StatusResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, erro
return xxx_messageInfo_StatusResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -648,7 +651,7 @@ func (m *ListStatusesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte,
return xxx_messageInfo_ListStatusesRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -687,7 +690,7 @@ func (m *ListStatusesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte
return xxx_messageInfo_ListStatusesResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -779,7 +782,7 @@ func (m *WriteContentRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte,
return xxx_messageInfo_WriteContentRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -844,7 +847,7 @@ func (m *WriteContentResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte
return xxx_messageInfo_WriteContentResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -883,7 +886,7 @@ func (m *AbortRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
return xxx_messageInfo_AbortRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -1283,6 +1286,38 @@ type ContentServer interface {
Abort(context.Context, *AbortRequest) (*types.Empty, error)
}
+// UnimplementedContentServer can be embedded to have forward compatible implementations.
+type UnimplementedContentServer struct {
+}
+
+func (*UnimplementedContentServer) Info(ctx context.Context, req *InfoRequest) (*InfoResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Info not implemented")
+}
+func (*UnimplementedContentServer) Update(ctx context.Context, req *UpdateRequest) (*UpdateResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Update not implemented")
+}
+func (*UnimplementedContentServer) List(req *ListContentRequest, srv Content_ListServer) error {
+ return status.Errorf(codes.Unimplemented, "method List not implemented")
+}
+func (*UnimplementedContentServer) Delete(ctx context.Context, req *DeleteContentRequest) (*types.Empty, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Delete not implemented")
+}
+func (*UnimplementedContentServer) Read(req *ReadContentRequest, srv Content_ReadServer) error {
+ return status.Errorf(codes.Unimplemented, "method Read not implemented")
+}
+func (*UnimplementedContentServer) Status(ctx context.Context, req *StatusRequest) (*StatusResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Status not implemented")
+}
+func (*UnimplementedContentServer) ListStatuses(ctx context.Context, req *ListStatusesRequest) (*ListStatusesResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method ListStatuses not implemented")
+}
+func (*UnimplementedContentServer) Write(srv Content_WriteServer) error {
+ return status.Errorf(codes.Unimplemented, "method Write not implemented")
+}
+func (*UnimplementedContentServer) Abort(ctx context.Context, req *AbortRequest) (*types.Empty, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Abort not implemented")
+}
+
func RegisterContentServer(s *grpc.Server, srv ContentServer) {
s.RegisterService(&_Content_serviceDesc, srv)
}
@@ -1516,7 +1551,7 @@ var _Content_serviceDesc = grpc.ServiceDesc{
func (m *Info) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1524,64 +1559,73 @@ func (m *Info) Marshal() (dAtA []byte, err error) {
}
func (m *Info) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Info) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.Digest) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintContent(dAtA, i, uint64(len(m.Digest)))
- i += copy(dAtA[i:], m.Digest)
- }
- if m.Size_ != 0 {
- dAtA[i] = 0x10
- i++
- i = encodeVarintContent(dAtA, i, uint64(m.Size_))
- }
- dAtA[i] = 0x1a
- i++
- i = encodeVarintContent(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.CreatedAt)))
- n1, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.CreatedAt, dAtA[i:])
- if err != nil {
- return 0, err
- }
- i += n1
- dAtA[i] = 0x22
- i++
- i = encodeVarintContent(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.UpdatedAt)))
- n2, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.UpdatedAt, dAtA[i:])
- if err != nil {
- return 0, err
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
- i += n2
if len(m.Labels) > 0 {
- for k, _ := range m.Labels {
- dAtA[i] = 0x2a
- i++
+ for k := range m.Labels {
v := m.Labels[k]
- mapSize := 1 + len(k) + sovContent(uint64(len(k))) + 1 + len(v) + sovContent(uint64(len(v)))
- i = encodeVarintContent(dAtA, i, uint64(mapSize))
- dAtA[i] = 0xa
- i++
- i = encodeVarintContent(dAtA, i, uint64(len(k)))
- i += copy(dAtA[i:], k)
- dAtA[i] = 0x12
- i++
+ baseI := i
+ i -= len(v)
+ copy(dAtA[i:], v)
i = encodeVarintContent(dAtA, i, uint64(len(v)))
- i += copy(dAtA[i:], v)
+ i--
+ dAtA[i] = 0x12
+ i -= len(k)
+ copy(dAtA[i:], k)
+ i = encodeVarintContent(dAtA, i, uint64(len(k)))
+ i--
+ dAtA[i] = 0xa
+ i = encodeVarintContent(dAtA, i, uint64(baseI-i))
+ i--
+ dAtA[i] = 0x2a
}
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ n1, err1 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.UpdatedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.UpdatedAt):])
+ if err1 != nil {
+ return 0, err1
+ }
+ i -= n1
+ i = encodeVarintContent(dAtA, i, uint64(n1))
+ i--
+ dAtA[i] = 0x22
+ n2, err2 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.CreatedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.CreatedAt):])
+ if err2 != nil {
+ return 0, err2
}
- return i, nil
+ i -= n2
+ i = encodeVarintContent(dAtA, i, uint64(n2))
+ i--
+ dAtA[i] = 0x1a
+ if m.Size_ != 0 {
+ i = encodeVarintContent(dAtA, i, uint64(m.Size_))
+ i--
+ dAtA[i] = 0x10
+ }
+ if len(m.Digest) > 0 {
+ i -= len(m.Digest)
+ copy(dAtA[i:], m.Digest)
+ i = encodeVarintContent(dAtA, i, uint64(len(m.Digest)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
}
func (m *InfoRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1589,26 +1633,33 @@ func (m *InfoRequest) Marshal() (dAtA []byte, err error) {
}
func (m *InfoRequest) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *InfoRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if len(m.Digest) > 0 {
- dAtA[i] = 0xa
- i++
+ i -= len(m.Digest)
+ copy(dAtA[i:], m.Digest)
i = encodeVarintContent(dAtA, i, uint64(len(m.Digest)))
- i += copy(dAtA[i:], m.Digest)
- }
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *InfoResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1616,28 +1667,36 @@ func (m *InfoResponse) Marshal() (dAtA []byte, err error) {
}
func (m *InfoResponse) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *InfoResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- dAtA[i] = 0xa
- i++
- i = encodeVarintContent(dAtA, i, uint64(m.Info.Size()))
- n3, err := m.Info.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
- }
- i += n3
if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
- return i, nil
+ {
+ size, err := m.Info.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintContent(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
}
func (m *UpdateRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1645,38 +1704,48 @@ func (m *UpdateRequest) Marshal() (dAtA []byte, err error) {
}
func (m *UpdateRequest) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *UpdateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- dAtA[i] = 0xa
- i++
- i = encodeVarintContent(dAtA, i, uint64(m.Info.Size()))
- n4, err := m.Info.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
- i += n4
if m.UpdateMask != nil {
+ {
+ size, err := m.UpdateMask.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintContent(dAtA, i, uint64(size))
+ }
+ i--
dAtA[i] = 0x12
- i++
- i = encodeVarintContent(dAtA, i, uint64(m.UpdateMask.Size()))
- n5, err := m.UpdateMask.MarshalTo(dAtA[i:])
+ }
+ {
+ size, err := m.Info.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
- i += n5
+ i -= size
+ i = encodeVarintContent(dAtA, i, uint64(size))
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
- }
- return i, nil
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
}
func (m *UpdateResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1684,28 +1753,36 @@ func (m *UpdateResponse) Marshal() (dAtA []byte, err error) {
}
func (m *UpdateResponse) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *UpdateResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- dAtA[i] = 0xa
- i++
- i = encodeVarintContent(dAtA, i, uint64(m.Info.Size()))
- n6, err := m.Info.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
- }
- i += n6
if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
- return i, nil
+ {
+ size, err := m.Info.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintContent(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
}
func (m *ListContentRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1713,35 +1790,35 @@ func (m *ListContentRequest) Marshal() (dAtA []byte, err error) {
}
func (m *ListContentRequest) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ListContentRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if len(m.Filters) > 0 {
- for _, s := range m.Filters {
+ for iNdEx := len(m.Filters) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Filters[iNdEx])
+ copy(dAtA[i:], m.Filters[iNdEx])
+ i = encodeVarintContent(dAtA, i, uint64(len(m.Filters[iNdEx])))
+ i--
dAtA[i] = 0xa
- i++
- l = len(s)
- for l >= 1<<7 {
- dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
- l >>= 7
- i++
- }
- dAtA[i] = uint8(l)
- i++
- i += copy(dAtA[i:], s)
}
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
- }
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *ListContentResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1749,32 +1826,40 @@ func (m *ListContentResponse) Marshal() (dAtA []byte, err error) {
}
func (m *ListContentResponse) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ListContentResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if len(m.Info) > 0 {
- for _, msg := range m.Info {
- dAtA[i] = 0xa
- i++
- i = encodeVarintContent(dAtA, i, uint64(msg.Size()))
- n, err := msg.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
+ for iNdEx := len(m.Info) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Info[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintContent(dAtA, i, uint64(size))
}
- i += n
+ i--
+ dAtA[i] = 0xa
}
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
- }
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *DeleteContentRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1782,26 +1867,33 @@ func (m *DeleteContentRequest) Marshal() (dAtA []byte, err error) {
}
func (m *DeleteContentRequest) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DeleteContentRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if len(m.Digest) > 0 {
- dAtA[i] = 0xa
- i++
+ i -= len(m.Digest)
+ copy(dAtA[i:], m.Digest)
i = encodeVarintContent(dAtA, i, uint64(len(m.Digest)))
- i += copy(dAtA[i:], m.Digest)
- }
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *ReadContentRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1809,36 +1901,43 @@ func (m *ReadContentRequest) Marshal() (dAtA []byte, err error) {
}
func (m *ReadContentRequest) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ReadContentRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.Digest) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintContent(dAtA, i, uint64(len(m.Digest)))
- i += copy(dAtA[i:], m.Digest)
- }
- if m.Offset != 0 {
- dAtA[i] = 0x10
- i++
- i = encodeVarintContent(dAtA, i, uint64(m.Offset))
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
if m.Size_ != 0 {
- dAtA[i] = 0x18
- i++
i = encodeVarintContent(dAtA, i, uint64(m.Size_))
+ i--
+ dAtA[i] = 0x18
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if m.Offset != 0 {
+ i = encodeVarintContent(dAtA, i, uint64(m.Offset))
+ i--
+ dAtA[i] = 0x10
+ }
+ if len(m.Digest) > 0 {
+ i -= len(m.Digest)
+ copy(dAtA[i:], m.Digest)
+ i = encodeVarintContent(dAtA, i, uint64(len(m.Digest)))
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *ReadContentResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1846,31 +1945,38 @@ func (m *ReadContentResponse) Marshal() (dAtA []byte, err error) {
}
func (m *ReadContentResponse) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ReadContentResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if m.Offset != 0 {
- dAtA[i] = 0x8
- i++
- i = encodeVarintContent(dAtA, i, uint64(m.Offset))
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
if len(m.Data) > 0 {
- dAtA[i] = 0x12
- i++
+ i -= len(m.Data)
+ copy(dAtA[i:], m.Data)
i = encodeVarintContent(dAtA, i, uint64(len(m.Data)))
- i += copy(dAtA[i:], m.Data)
+ i--
+ dAtA[i] = 0x12
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if m.Offset != 0 {
+ i = encodeVarintContent(dAtA, i, uint64(m.Offset))
+ i--
+ dAtA[i] = 0x8
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *Status) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1878,58 +1984,66 @@ func (m *Status) Marshal() (dAtA []byte, err error) {
}
func (m *Status) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Status) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- dAtA[i] = 0xa
- i++
- i = encodeVarintContent(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.StartedAt)))
- n7, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.StartedAt, dAtA[i:])
- if err != nil {
- return 0, err
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
- i += n7
- dAtA[i] = 0x12
- i++
- i = encodeVarintContent(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.UpdatedAt)))
- n8, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.UpdatedAt, dAtA[i:])
- if err != nil {
- return 0, err
+ if len(m.Expected) > 0 {
+ i -= len(m.Expected)
+ copy(dAtA[i:], m.Expected)
+ i = encodeVarintContent(dAtA, i, uint64(len(m.Expected)))
+ i--
+ dAtA[i] = 0x32
}
- i += n8
- if len(m.Ref) > 0 {
- dAtA[i] = 0x1a
- i++
- i = encodeVarintContent(dAtA, i, uint64(len(m.Ref)))
- i += copy(dAtA[i:], m.Ref)
+ if m.Total != 0 {
+ i = encodeVarintContent(dAtA, i, uint64(m.Total))
+ i--
+ dAtA[i] = 0x28
}
if m.Offset != 0 {
- dAtA[i] = 0x20
- i++
i = encodeVarintContent(dAtA, i, uint64(m.Offset))
+ i--
+ dAtA[i] = 0x20
}
- if m.Total != 0 {
- dAtA[i] = 0x28
- i++
- i = encodeVarintContent(dAtA, i, uint64(m.Total))
+ if len(m.Ref) > 0 {
+ i -= len(m.Ref)
+ copy(dAtA[i:], m.Ref)
+ i = encodeVarintContent(dAtA, i, uint64(len(m.Ref)))
+ i--
+ dAtA[i] = 0x1a
}
- if len(m.Expected) > 0 {
- dAtA[i] = 0x32
- i++
- i = encodeVarintContent(dAtA, i, uint64(len(m.Expected)))
- i += copy(dAtA[i:], m.Expected)
+ n7, err7 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.UpdatedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.UpdatedAt):])
+ if err7 != nil {
+ return 0, err7
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ i -= n7
+ i = encodeVarintContent(dAtA, i, uint64(n7))
+ i--
+ dAtA[i] = 0x12
+ n8, err8 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.StartedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.StartedAt):])
+ if err8 != nil {
+ return 0, err8
}
- return i, nil
+ i -= n8
+ i = encodeVarintContent(dAtA, i, uint64(n8))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
}
func (m *StatusRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1937,26 +2051,33 @@ func (m *StatusRequest) Marshal() (dAtA []byte, err error) {
}
func (m *StatusRequest) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *StatusRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if len(m.Ref) > 0 {
- dAtA[i] = 0xa
- i++
+ i -= len(m.Ref)
+ copy(dAtA[i:], m.Ref)
i = encodeVarintContent(dAtA, i, uint64(len(m.Ref)))
- i += copy(dAtA[i:], m.Ref)
- }
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *StatusResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1964,30 +2085,38 @@ func (m *StatusResponse) Marshal() (dAtA []byte, err error) {
}
func (m *StatusResponse) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *StatusResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if m.Status != nil {
- dAtA[i] = 0xa
- i++
- i = encodeVarintContent(dAtA, i, uint64(m.Status.Size()))
- n9, err := m.Status.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
+ {
+ size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintContent(dAtA, i, uint64(size))
}
- i += n9
- }
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *ListStatusesRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1995,35 +2124,35 @@ func (m *ListStatusesRequest) Marshal() (dAtA []byte, err error) {
}
func (m *ListStatusesRequest) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ListStatusesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if len(m.Filters) > 0 {
- for _, s := range m.Filters {
+ for iNdEx := len(m.Filters) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Filters[iNdEx])
+ copy(dAtA[i:], m.Filters[iNdEx])
+ i = encodeVarintContent(dAtA, i, uint64(len(m.Filters[iNdEx])))
+ i--
dAtA[i] = 0xa
- i++
- l = len(s)
- for l >= 1<<7 {
- dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
- l >>= 7
- i++
- }
- dAtA[i] = uint8(l)
- i++
- i += copy(dAtA[i:], s)
}
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
- }
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *ListStatusesResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -2031,32 +2160,40 @@ func (m *ListStatusesResponse) Marshal() (dAtA []byte, err error) {
}
func (m *ListStatusesResponse) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ListStatusesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if len(m.Statuses) > 0 {
- for _, msg := range m.Statuses {
- dAtA[i] = 0xa
- i++
- i = encodeVarintContent(dAtA, i, uint64(msg.Size()))
- n, err := msg.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
+ for iNdEx := len(m.Statuses) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Statuses[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintContent(dAtA, i, uint64(size))
}
- i += n
+ i--
+ dAtA[i] = 0xa
}
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
- }
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *WriteContentRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -2064,70 +2201,81 @@ func (m *WriteContentRequest) Marshal() (dAtA []byte, err error) {
}
func (m *WriteContentRequest) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *WriteContentRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if m.Action != 0 {
- dAtA[i] = 0x8
- i++
- i = encodeVarintContent(dAtA, i, uint64(m.Action))
- }
- if len(m.Ref) > 0 {
- dAtA[i] = 0x12
- i++
- i = encodeVarintContent(dAtA, i, uint64(len(m.Ref)))
- i += copy(dAtA[i:], m.Ref)
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
- if m.Total != 0 {
- dAtA[i] = 0x18
- i++
- i = encodeVarintContent(dAtA, i, uint64(m.Total))
+ if len(m.Labels) > 0 {
+ for k := range m.Labels {
+ v := m.Labels[k]
+ baseI := i
+ i -= len(v)
+ copy(dAtA[i:], v)
+ i = encodeVarintContent(dAtA, i, uint64(len(v)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(k)
+ copy(dAtA[i:], k)
+ i = encodeVarintContent(dAtA, i, uint64(len(k)))
+ i--
+ dAtA[i] = 0xa
+ i = encodeVarintContent(dAtA, i, uint64(baseI-i))
+ i--
+ dAtA[i] = 0x3a
+ }
}
- if len(m.Expected) > 0 {
- dAtA[i] = 0x22
- i++
- i = encodeVarintContent(dAtA, i, uint64(len(m.Expected)))
- i += copy(dAtA[i:], m.Expected)
+ if len(m.Data) > 0 {
+ i -= len(m.Data)
+ copy(dAtA[i:], m.Data)
+ i = encodeVarintContent(dAtA, i, uint64(len(m.Data)))
+ i--
+ dAtA[i] = 0x32
}
if m.Offset != 0 {
- dAtA[i] = 0x28
- i++
i = encodeVarintContent(dAtA, i, uint64(m.Offset))
+ i--
+ dAtA[i] = 0x28
}
- if len(m.Data) > 0 {
- dAtA[i] = 0x32
- i++
- i = encodeVarintContent(dAtA, i, uint64(len(m.Data)))
- i += copy(dAtA[i:], m.Data)
+ if len(m.Expected) > 0 {
+ i -= len(m.Expected)
+ copy(dAtA[i:], m.Expected)
+ i = encodeVarintContent(dAtA, i, uint64(len(m.Expected)))
+ i--
+ dAtA[i] = 0x22
}
- if len(m.Labels) > 0 {
- for k, _ := range m.Labels {
- dAtA[i] = 0x3a
- i++
- v := m.Labels[k]
- mapSize := 1 + len(k) + sovContent(uint64(len(k))) + 1 + len(v) + sovContent(uint64(len(v)))
- i = encodeVarintContent(dAtA, i, uint64(mapSize))
- dAtA[i] = 0xa
- i++
- i = encodeVarintContent(dAtA, i, uint64(len(k)))
- i += copy(dAtA[i:], k)
- dAtA[i] = 0x12
- i++
- i = encodeVarintContent(dAtA, i, uint64(len(v)))
- i += copy(dAtA[i:], v)
- }
+ if m.Total != 0 {
+ i = encodeVarintContent(dAtA, i, uint64(m.Total))
+ i--
+ dAtA[i] = 0x18
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if len(m.Ref) > 0 {
+ i -= len(m.Ref)
+ copy(dAtA[i:], m.Ref)
+ i = encodeVarintContent(dAtA, i, uint64(len(m.Ref)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.Action != 0 {
+ i = encodeVarintContent(dAtA, i, uint64(m.Action))
+ i--
+ dAtA[i] = 0x8
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *WriteContentResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -2135,57 +2283,64 @@ func (m *WriteContentResponse) Marshal() (dAtA []byte, err error) {
}
func (m *WriteContentResponse) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *WriteContentResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if m.Action != 0 {
- dAtA[i] = 0x8
- i++
- i = encodeVarintContent(dAtA, i, uint64(m.Action))
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
- dAtA[i] = 0x12
- i++
- i = encodeVarintContent(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.StartedAt)))
- n10, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.StartedAt, dAtA[i:])
- if err != nil {
- return 0, err
+ if len(m.Digest) > 0 {
+ i -= len(m.Digest)
+ copy(dAtA[i:], m.Digest)
+ i = encodeVarintContent(dAtA, i, uint64(len(m.Digest)))
+ i--
+ dAtA[i] = 0x32
}
- i += n10
- dAtA[i] = 0x1a
- i++
- i = encodeVarintContent(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.UpdatedAt)))
- n11, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.UpdatedAt, dAtA[i:])
- if err != nil {
- return 0, err
+ if m.Total != 0 {
+ i = encodeVarintContent(dAtA, i, uint64(m.Total))
+ i--
+ dAtA[i] = 0x28
}
- i += n11
if m.Offset != 0 {
- dAtA[i] = 0x20
- i++
i = encodeVarintContent(dAtA, i, uint64(m.Offset))
+ i--
+ dAtA[i] = 0x20
}
- if m.Total != 0 {
- dAtA[i] = 0x28
- i++
- i = encodeVarintContent(dAtA, i, uint64(m.Total))
+ n10, err10 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.UpdatedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.UpdatedAt):])
+ if err10 != nil {
+ return 0, err10
}
- if len(m.Digest) > 0 {
- dAtA[i] = 0x32
- i++
- i = encodeVarintContent(dAtA, i, uint64(len(m.Digest)))
- i += copy(dAtA[i:], m.Digest)
+ i -= n10
+ i = encodeVarintContent(dAtA, i, uint64(n10))
+ i--
+ dAtA[i] = 0x1a
+ n11, err11 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.StartedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.StartedAt):])
+ if err11 != nil {
+ return 0, err11
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ i -= n11
+ i = encodeVarintContent(dAtA, i, uint64(n11))
+ i--
+ dAtA[i] = 0x12
+ if m.Action != 0 {
+ i = encodeVarintContent(dAtA, i, uint64(m.Action))
+ i--
+ dAtA[i] = 0x8
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *AbortRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -2193,30 +2348,39 @@ func (m *AbortRequest) Marshal() (dAtA []byte, err error) {
}
func (m *AbortRequest) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AbortRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if len(m.Ref) > 0 {
- dAtA[i] = 0xa
- i++
+ i -= len(m.Ref)
+ copy(dAtA[i:], m.Ref)
i = encodeVarintContent(dAtA, i, uint64(len(m.Ref)))
- i += copy(dAtA[i:], m.Ref)
- }
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func encodeVarintContent(dAtA []byte, offset int, v uint64) int {
+ offset -= sovContent(v)
+ base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
- return offset + 1
+ return base
}
func (m *Info) Size() (n int) {
if m == nil {
@@ -2589,14 +2753,7 @@ func (m *AbortRequest) Size() (n int) {
}
func sovContent(x uint64) (n int) {
- for {
- n++
- x >>= 7
- if x == 0 {
- break
- }
- }
- return n
+ return (math_bits.Len64(x|1) + 6) / 7
}
func sozContent(x uint64) (n int) {
return sovContent(uint64((x << 1) ^ uint64((int64(x) >> 63))))
@@ -2618,8 +2775,8 @@ func (this *Info) String() string {
s := strings.Join([]string{`&Info{`,
`Digest:` + fmt.Sprintf("%v", this.Digest) + `,`,
`Size_:` + fmt.Sprintf("%v", this.Size_) + `,`,
- `CreatedAt:` + strings.Replace(strings.Replace(this.CreatedAt.String(), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`,
- `UpdatedAt:` + strings.Replace(strings.Replace(this.UpdatedAt.String(), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`,
+ `CreatedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CreatedAt), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`,
+ `UpdatedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.UpdatedAt), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`,
`Labels:` + mapStringForLabels + `,`,
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
`}`,
@@ -2686,8 +2843,13 @@ func (this *ListContentResponse) String() string {
if this == nil {
return "nil"
}
+ repeatedStringForInfo := "[]Info{"
+ for _, f := range this.Info {
+ repeatedStringForInfo += strings.Replace(strings.Replace(f.String(), "Info", "Info", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForInfo += "}"
s := strings.Join([]string{`&ListContentResponse{`,
- `Info:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Info), "Info", "Info", 1), `&`, ``, 1) + `,`,
+ `Info:` + repeatedStringForInfo + `,`,
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
`}`,
}, "")
@@ -2734,8 +2896,8 @@ func (this *Status) String() string {
return "nil"
}
s := strings.Join([]string{`&Status{`,
- `StartedAt:` + strings.Replace(strings.Replace(this.StartedAt.String(), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`,
- `UpdatedAt:` + strings.Replace(strings.Replace(this.UpdatedAt.String(), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`,
+ `StartedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.StartedAt), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`,
+ `UpdatedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.UpdatedAt), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`,
`Ref:` + fmt.Sprintf("%v", this.Ref) + `,`,
`Offset:` + fmt.Sprintf("%v", this.Offset) + `,`,
`Total:` + fmt.Sprintf("%v", this.Total) + `,`,
@@ -2761,7 +2923,7 @@ func (this *StatusResponse) String() string {
return "nil"
}
s := strings.Join([]string{`&StatusResponse{`,
- `Status:` + strings.Replace(fmt.Sprintf("%v", this.Status), "Status", "Status", 1) + `,`,
+ `Status:` + strings.Replace(this.Status.String(), "Status", "Status", 1) + `,`,
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
`}`,
}, "")
@@ -2782,8 +2944,13 @@ func (this *ListStatusesResponse) String() string {
if this == nil {
return "nil"
}
+ repeatedStringForStatuses := "[]Status{"
+ for _, f := range this.Statuses {
+ repeatedStringForStatuses += strings.Replace(strings.Replace(f.String(), "Status", "Status", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForStatuses += "}"
s := strings.Join([]string{`&ListStatusesResponse{`,
- `Statuses:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Statuses), "Status", "Status", 1), `&`, ``, 1) + `,`,
+ `Statuses:` + repeatedStringForStatuses + `,`,
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
`}`,
}, "")
@@ -2822,8 +2989,8 @@ func (this *WriteContentResponse) String() string {
}
s := strings.Join([]string{`&WriteContentResponse{`,
`Action:` + fmt.Sprintf("%v", this.Action) + `,`,
- `StartedAt:` + strings.Replace(strings.Replace(this.StartedAt.String(), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`,
- `UpdatedAt:` + strings.Replace(strings.Replace(this.UpdatedAt.String(), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`,
+ `StartedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.StartedAt), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`,
+ `UpdatedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.UpdatedAt), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`,
`Offset:` + fmt.Sprintf("%v", this.Offset) + `,`,
`Total:` + fmt.Sprintf("%v", this.Total) + `,`,
`Digest:` + fmt.Sprintf("%v", this.Digest) + `,`,
@@ -5229,6 +5396,7 @@ func (m *AbortRequest) Unmarshal(dAtA []byte) error {
func skipContent(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
+ depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
@@ -5260,10 +5428,8 @@ func skipContent(dAtA []byte) (n int, err error) {
break
}
}
- return iNdEx, nil
case 1:
iNdEx += 8
- return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
@@ -5284,55 +5450,30 @@ func skipContent(dAtA []byte) (n int, err error) {
return 0, ErrInvalidLengthContent
}
iNdEx += length
- if iNdEx < 0 {
- return 0, ErrInvalidLengthContent
- }
- return iNdEx, nil
case 3:
- for {
- var innerWire uint64
- var start int = iNdEx
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowContent
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- innerWire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- innerWireType := int(innerWire & 0x7)
- if innerWireType == 4 {
- break
- }
- next, err := skipContent(dAtA[start:])
- if err != nil {
- return 0, err
- }
- iNdEx = start + next
- if iNdEx < 0 {
- return 0, ErrInvalidLengthContent
- }
- }
- return iNdEx, nil
+ depth++
case 4:
- return iNdEx, nil
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupContent
+ }
+ depth--
case 5:
iNdEx += 4
- return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthContent
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
}
- panic("unreachable")
+ return 0, io.ErrUnexpectedEOF
}
var (
- ErrInvalidLengthContent = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowContent = fmt.Errorf("proto: integer overflow")
+ ErrInvalidLengthContent = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowContent = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupContent = fmt.Errorf("proto: unexpected end of group")
)
diff --git a/vendor/github.com/containerd/containerd/api/services/diff/v1/diff.pb.go b/vendor/github.com/containerd/containerd/api/services/diff/v1/diff.pb.go
index 6c7920004..48379234d 100644
--- a/vendor/github.com/containerd/containerd/api/services/diff/v1/diff.pb.go
+++ b/vendor/github.com/containerd/containerd/api/services/diff/v1/diff.pb.go
@@ -11,8 +11,11 @@ import (
github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
types1 "github.com/gogo/protobuf/types"
grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
io "io"
math "math"
+ math_bits "math/bits"
reflect "reflect"
strings "strings"
)
@@ -26,7 +29,7 @@ var _ = math.Inf
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
type ApplyRequest struct {
// Diff is the descriptor of the diff to be extracted
@@ -51,7 +54,7 @@ func (m *ApplyRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
return xxx_messageInfo_ApplyRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -93,7 +96,7 @@ func (m *ApplyResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error
return xxx_messageInfo_ApplyResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -146,7 +149,7 @@ func (m *DiffRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
return xxx_messageInfo_DiffRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -186,7 +189,7 @@ func (m *DiffResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
return xxx_messageInfo_DiffResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -313,6 +316,17 @@ type DiffServer interface {
Diff(context.Context, *DiffRequest) (*DiffResponse, error)
}
+// UnimplementedDiffServer can be embedded to have forward compatible implementations.
+type UnimplementedDiffServer struct {
+}
+
+func (*UnimplementedDiffServer) Apply(ctx context.Context, req *ApplyRequest) (*ApplyResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Apply not implemented")
+}
+func (*UnimplementedDiffServer) Diff(ctx context.Context, req *DiffRequest) (*DiffResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Diff not implemented")
+}
+
func RegisterDiffServer(s *grpc.Server, srv DiffServer) {
s.RegisterService(&_Diff_serviceDesc, srv)
}
@@ -373,7 +387,7 @@ var _Diff_serviceDesc = grpc.ServiceDesc{
func (m *ApplyRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -381,70 +395,78 @@ func (m *ApplyRequest) Marshal() (dAtA []byte, err error) {
}
func (m *ApplyRequest) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ApplyRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if m.Diff != nil {
- dAtA[i] = 0xa
- i++
- i = encodeVarintDiff(dAtA, i, uint64(m.Diff.Size()))
- n1, err := m.Diff.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
- }
- i += n1
- }
- if len(m.Mounts) > 0 {
- for _, msg := range m.Mounts {
- dAtA[i] = 0x12
- i++
- i = encodeVarintDiff(dAtA, i, uint64(msg.Size()))
- n, err := msg.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
- }
- i += n
- }
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
if len(m.Payloads) > 0 {
- for k, _ := range m.Payloads {
- dAtA[i] = 0x1a
- i++
+ for k := range m.Payloads {
v := m.Payloads[k]
- msgSize := 0
+ baseI := i
if v != nil {
- msgSize = v.Size()
- msgSize += 1 + sovDiff(uint64(msgSize))
+ {
+ size, err := v.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintDiff(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
}
- mapSize := 1 + len(k) + sovDiff(uint64(len(k))) + msgSize
- i = encodeVarintDiff(dAtA, i, uint64(mapSize))
- dAtA[i] = 0xa
- i++
+ i -= len(k)
+ copy(dAtA[i:], k)
i = encodeVarintDiff(dAtA, i, uint64(len(k)))
- i += copy(dAtA[i:], k)
- if v != nil {
- dAtA[i] = 0x12
- i++
- i = encodeVarintDiff(dAtA, i, uint64(v.Size()))
- n2, err := v.MarshalTo(dAtA[i:])
+ i--
+ dAtA[i] = 0xa
+ i = encodeVarintDiff(dAtA, i, uint64(baseI-i))
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ if len(m.Mounts) > 0 {
+ for iNdEx := len(m.Mounts) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Mounts[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
- i += n2
+ i -= size
+ i = encodeVarintDiff(dAtA, i, uint64(size))
}
+ i--
+ dAtA[i] = 0x12
}
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if m.Diff != nil {
+ {
+ size, err := m.Diff.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintDiff(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *ApplyResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -452,30 +474,38 @@ func (m *ApplyResponse) Marshal() (dAtA []byte, err error) {
}
func (m *ApplyResponse) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ApplyResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if m.Applied != nil {
- dAtA[i] = 0xa
- i++
- i = encodeVarintDiff(dAtA, i, uint64(m.Applied.Size()))
- n3, err := m.Applied.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
+ {
+ size, err := m.Applied.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintDiff(dAtA, i, uint64(size))
}
- i += n3
- }
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *DiffRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -483,73 +513,87 @@ func (m *DiffRequest) Marshal() (dAtA []byte, err error) {
}
func (m *DiffRequest) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DiffRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.Left) > 0 {
- for _, msg := range m.Left {
- dAtA[i] = 0xa
- i++
- i = encodeVarintDiff(dAtA, i, uint64(msg.Size()))
- n, err := msg.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
- }
- i += n
- }
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
- if len(m.Right) > 0 {
- for _, msg := range m.Right {
+ if len(m.Labels) > 0 {
+ for k := range m.Labels {
+ v := m.Labels[k]
+ baseI := i
+ i -= len(v)
+ copy(dAtA[i:], v)
+ i = encodeVarintDiff(dAtA, i, uint64(len(v)))
+ i--
dAtA[i] = 0x12
- i++
- i = encodeVarintDiff(dAtA, i, uint64(msg.Size()))
- n, err := msg.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
- }
- i += n
+ i -= len(k)
+ copy(dAtA[i:], k)
+ i = encodeVarintDiff(dAtA, i, uint64(len(k)))
+ i--
+ dAtA[i] = 0xa
+ i = encodeVarintDiff(dAtA, i, uint64(baseI-i))
+ i--
+ dAtA[i] = 0x2a
}
}
- if len(m.MediaType) > 0 {
- dAtA[i] = 0x1a
- i++
- i = encodeVarintDiff(dAtA, i, uint64(len(m.MediaType)))
- i += copy(dAtA[i:], m.MediaType)
- }
if len(m.Ref) > 0 {
- dAtA[i] = 0x22
- i++
+ i -= len(m.Ref)
+ copy(dAtA[i:], m.Ref)
i = encodeVarintDiff(dAtA, i, uint64(len(m.Ref)))
- i += copy(dAtA[i:], m.Ref)
+ i--
+ dAtA[i] = 0x22
}
- if len(m.Labels) > 0 {
- for k, _ := range m.Labels {
- dAtA[i] = 0x2a
- i++
- v := m.Labels[k]
- mapSize := 1 + len(k) + sovDiff(uint64(len(k))) + 1 + len(v) + sovDiff(uint64(len(v)))
- i = encodeVarintDiff(dAtA, i, uint64(mapSize))
- dAtA[i] = 0xa
- i++
- i = encodeVarintDiff(dAtA, i, uint64(len(k)))
- i += copy(dAtA[i:], k)
+ if len(m.MediaType) > 0 {
+ i -= len(m.MediaType)
+ copy(dAtA[i:], m.MediaType)
+ i = encodeVarintDiff(dAtA, i, uint64(len(m.MediaType)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if len(m.Right) > 0 {
+ for iNdEx := len(m.Right) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Right[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintDiff(dAtA, i, uint64(size))
+ }
+ i--
dAtA[i] = 0x12
- i++
- i = encodeVarintDiff(dAtA, i, uint64(len(v)))
- i += copy(dAtA[i:], v)
}
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if len(m.Left) > 0 {
+ for iNdEx := len(m.Left) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Left[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintDiff(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *DiffResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -557,34 +601,44 @@ func (m *DiffResponse) Marshal() (dAtA []byte, err error) {
}
func (m *DiffResponse) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DiffResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if m.Diff != nil {
- dAtA[i] = 0x1a
- i++
- i = encodeVarintDiff(dAtA, i, uint64(m.Diff.Size()))
- n4, err := m.Diff.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
+ {
+ size, err := m.Diff.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintDiff(dAtA, i, uint64(size))
}
- i += n4
- }
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ i--
+ dAtA[i] = 0x1a
}
- return i, nil
+ return len(dAtA) - i, nil
}
func encodeVarintDiff(dAtA []byte, offset int, v uint64) int {
+ offset -= sovDiff(v)
+ base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
- return offset + 1
+ return base
}
func (m *ApplyRequest) Size() (n int) {
if m == nil {
@@ -694,14 +748,7 @@ func (m *DiffResponse) Size() (n int) {
}
func sovDiff(x uint64) (n int) {
- for {
- n++
- x >>= 7
- if x == 0 {
- break
- }
- }
- return n
+ return (math_bits.Len64(x|1) + 6) / 7
}
func sozDiff(x uint64) (n int) {
return sovDiff(uint64((x << 1) ^ uint64((int64(x) >> 63))))
@@ -710,6 +757,11 @@ func (this *ApplyRequest) String() string {
if this == nil {
return "nil"
}
+ repeatedStringForMounts := "[]*Mount{"
+ for _, f := range this.Mounts {
+ repeatedStringForMounts += strings.Replace(fmt.Sprintf("%v", f), "Mount", "types.Mount", 1) + ","
+ }
+ repeatedStringForMounts += "}"
keysForPayloads := make([]string, 0, len(this.Payloads))
for k, _ := range this.Payloads {
keysForPayloads = append(keysForPayloads, k)
@@ -722,7 +774,7 @@ func (this *ApplyRequest) String() string {
mapStringForPayloads += "}"
s := strings.Join([]string{`&ApplyRequest{`,
`Diff:` + strings.Replace(fmt.Sprintf("%v", this.Diff), "Descriptor", "types.Descriptor", 1) + `,`,
- `Mounts:` + strings.Replace(fmt.Sprintf("%v", this.Mounts), "Mount", "types.Mount", 1) + `,`,
+ `Mounts:` + repeatedStringForMounts + `,`,
`Payloads:` + mapStringForPayloads + `,`,
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
`}`,
@@ -744,6 +796,16 @@ func (this *DiffRequest) String() string {
if this == nil {
return "nil"
}
+ repeatedStringForLeft := "[]*Mount{"
+ for _, f := range this.Left {
+ repeatedStringForLeft += strings.Replace(fmt.Sprintf("%v", f), "Mount", "types.Mount", 1) + ","
+ }
+ repeatedStringForLeft += "}"
+ repeatedStringForRight := "[]*Mount{"
+ for _, f := range this.Right {
+ repeatedStringForRight += strings.Replace(fmt.Sprintf("%v", f), "Mount", "types.Mount", 1) + ","
+ }
+ repeatedStringForRight += "}"
keysForLabels := make([]string, 0, len(this.Labels))
for k, _ := range this.Labels {
keysForLabels = append(keysForLabels, k)
@@ -755,8 +817,8 @@ func (this *DiffRequest) String() string {
}
mapStringForLabels += "}"
s := strings.Join([]string{`&DiffRequest{`,
- `Left:` + strings.Replace(fmt.Sprintf("%v", this.Left), "Mount", "types.Mount", 1) + `,`,
- `Right:` + strings.Replace(fmt.Sprintf("%v", this.Right), "Mount", "types.Mount", 1) + `,`,
+ `Left:` + repeatedStringForLeft + `,`,
+ `Right:` + repeatedStringForRight + `,`,
`MediaType:` + fmt.Sprintf("%v", this.MediaType) + `,`,
`Ref:` + fmt.Sprintf("%v", this.Ref) + `,`,
`Labels:` + mapStringForLabels + `,`,
@@ -1533,6 +1595,7 @@ func (m *DiffResponse) Unmarshal(dAtA []byte) error {
func skipDiff(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
+ depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
@@ -1564,10 +1627,8 @@ func skipDiff(dAtA []byte) (n int, err error) {
break
}
}
- return iNdEx, nil
case 1:
iNdEx += 8
- return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
@@ -1588,55 +1649,30 @@ func skipDiff(dAtA []byte) (n int, err error) {
return 0, ErrInvalidLengthDiff
}
iNdEx += length
- if iNdEx < 0 {
- return 0, ErrInvalidLengthDiff
- }
- return iNdEx, nil
case 3:
- for {
- var innerWire uint64
- var start int = iNdEx
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowDiff
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- innerWire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- innerWireType := int(innerWire & 0x7)
- if innerWireType == 4 {
- break
- }
- next, err := skipDiff(dAtA[start:])
- if err != nil {
- return 0, err
- }
- iNdEx = start + next
- if iNdEx < 0 {
- return 0, ErrInvalidLengthDiff
- }
- }
- return iNdEx, nil
+ depth++
case 4:
- return iNdEx, nil
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupDiff
+ }
+ depth--
case 5:
iNdEx += 4
- return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthDiff
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
}
- panic("unreachable")
+ return 0, io.ErrUnexpectedEOF
}
var (
- ErrInvalidLengthDiff = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowDiff = fmt.Errorf("proto: integer overflow")
+ ErrInvalidLengthDiff = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowDiff = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupDiff = fmt.Errorf("proto: unexpected end of group")
)
diff --git a/vendor/github.com/containerd/containerd/api/services/events/v1/events.pb.go b/vendor/github.com/containerd/containerd/api/services/events/v1/events.pb.go
index dcaebbf2d..a1674f862 100644
--- a/vendor/github.com/containerd/containerd/api/services/events/v1/events.pb.go
+++ b/vendor/github.com/containerd/containerd/api/services/events/v1/events.pb.go
@@ -11,8 +11,11 @@ import (
github_com_gogo_protobuf_types "github.com/gogo/protobuf/types"
types "github.com/gogo/protobuf/types"
grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
io "io"
math "math"
+ math_bits "math/bits"
reflect "reflect"
strings "strings"
time "time"
@@ -28,7 +31,7 @@ var _ = time.Kitchen
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
type PublishRequest struct {
Topic string `protobuf:"bytes,1,opt,name=topic,proto3" json:"topic,omitempty"`
@@ -51,7 +54,7 @@ func (m *PublishRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, erro
return xxx_messageInfo_PublishRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -90,7 +93,7 @@ func (m *ForwardRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, erro
return xxx_messageInfo_ForwardRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -129,7 +132,7 @@ func (m *SubscribeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, er
return xxx_messageInfo_SubscribeRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -171,7 +174,7 @@ func (m *Envelope) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Envelope.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -377,6 +380,20 @@ type EventsServer interface {
Subscribe(*SubscribeRequest, Events_SubscribeServer) error
}
+// UnimplementedEventsServer can be embedded to have forward compatible implementations.
+type UnimplementedEventsServer struct {
+}
+
+func (*UnimplementedEventsServer) Publish(ctx context.Context, req *PublishRequest) (*types.Empty, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Publish not implemented")
+}
+func (*UnimplementedEventsServer) Forward(ctx context.Context, req *ForwardRequest) (*types.Empty, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Forward not implemented")
+}
+func (*UnimplementedEventsServer) Subscribe(req *SubscribeRequest, srv Events_SubscribeServer) error {
+ return status.Errorf(codes.Unimplemented, "method Subscribe not implemented")
+}
+
func RegisterEventsServer(s *grpc.Server, srv EventsServer) {
s.RegisterService(&_Events_serviceDesc, srv)
}
@@ -464,7 +481,7 @@ var _Events_serviceDesc = grpc.ServiceDesc{
func (m *PublishRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -472,36 +489,45 @@ func (m *PublishRequest) Marshal() (dAtA []byte, err error) {
}
func (m *PublishRequest) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PublishRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.Topic) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintEvents(dAtA, i, uint64(len(m.Topic)))
- i += copy(dAtA[i:], m.Topic)
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
if m.Event != nil {
- dAtA[i] = 0x12
- i++
- i = encodeVarintEvents(dAtA, i, uint64(m.Event.Size()))
- n1, err := m.Event.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
+ {
+ size, err := m.Event.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintEvents(dAtA, i, uint64(size))
}
- i += n1
+ i--
+ dAtA[i] = 0x12
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if len(m.Topic) > 0 {
+ i -= len(m.Topic)
+ copy(dAtA[i:], m.Topic)
+ i = encodeVarintEvents(dAtA, i, uint64(len(m.Topic)))
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *ForwardRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -509,30 +535,38 @@ func (m *ForwardRequest) Marshal() (dAtA []byte, err error) {
}
func (m *ForwardRequest) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ForwardRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if m.Envelope != nil {
- dAtA[i] = 0xa
- i++
- i = encodeVarintEvents(dAtA, i, uint64(m.Envelope.Size()))
- n2, err := m.Envelope.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
+ {
+ size, err := m.Envelope.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintEvents(dAtA, i, uint64(size))
}
- i += n2
- }
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *SubscribeRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -540,35 +574,35 @@ func (m *SubscribeRequest) Marshal() (dAtA []byte, err error) {
}
func (m *SubscribeRequest) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SubscribeRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if len(m.Filters) > 0 {
- for _, s := range m.Filters {
+ for iNdEx := len(m.Filters) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Filters[iNdEx])
+ copy(dAtA[i:], m.Filters[iNdEx])
+ i = encodeVarintEvents(dAtA, i, uint64(len(m.Filters[iNdEx])))
+ i--
dAtA[i] = 0xa
- i++
- l = len(s)
- for l >= 1<<7 {
- dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
- l >>= 7
- i++
- }
- dAtA[i] = uint8(l)
- i++
- i += copy(dAtA[i:], s)
}
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
- }
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *Envelope) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -576,54 +610,66 @@ func (m *Envelope) Marshal() (dAtA []byte, err error) {
}
func (m *Envelope) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Envelope) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- dAtA[i] = 0xa
- i++
- i = encodeVarintEvents(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp)))
- n3, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i:])
- if err != nil {
- return 0, err
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
- i += n3
- if len(m.Namespace) > 0 {
- dAtA[i] = 0x12
- i++
- i = encodeVarintEvents(dAtA, i, uint64(len(m.Namespace)))
- i += copy(dAtA[i:], m.Namespace)
+ if m.Event != nil {
+ {
+ size, err := m.Event.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintEvents(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
}
if len(m.Topic) > 0 {
- dAtA[i] = 0x1a
- i++
+ i -= len(m.Topic)
+ copy(dAtA[i:], m.Topic)
i = encodeVarintEvents(dAtA, i, uint64(len(m.Topic)))
- i += copy(dAtA[i:], m.Topic)
+ i--
+ dAtA[i] = 0x1a
}
- if m.Event != nil {
- dAtA[i] = 0x22
- i++
- i = encodeVarintEvents(dAtA, i, uint64(m.Event.Size()))
- n4, err := m.Event.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
- }
- i += n4
+ if len(m.Namespace) > 0 {
+ i -= len(m.Namespace)
+ copy(dAtA[i:], m.Namespace)
+ i = encodeVarintEvents(dAtA, i, uint64(len(m.Namespace)))
+ i--
+ dAtA[i] = 0x12
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ n4, err4 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp):])
+ if err4 != nil {
+ return 0, err4
}
- return i, nil
+ i -= n4
+ i = encodeVarintEvents(dAtA, i, uint64(n4))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
}
func encodeVarintEvents(dAtA []byte, offset int, v uint64) int {
+ offset -= sovEvents(v)
+ base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
- return offset + 1
+ return base
}
func (m *PublishRequest) Size() (n int) {
if m == nil {
@@ -706,14 +752,7 @@ func (m *Envelope) Size() (n int) {
}
func sovEvents(x uint64) (n int) {
- for {
- n++
- x >>= 7
- if x == 0 {
- break
- }
- }
- return n
+ return (math_bits.Len64(x|1) + 6) / 7
}
func sozEvents(x uint64) (n int) {
return sovEvents(uint64((x << 1) ^ uint64((int64(x) >> 63))))
@@ -735,7 +774,7 @@ func (this *ForwardRequest) String() string {
return "nil"
}
s := strings.Join([]string{`&ForwardRequest{`,
- `Envelope:` + strings.Replace(fmt.Sprintf("%v", this.Envelope), "Envelope", "Envelope", 1) + `,`,
+ `Envelope:` + strings.Replace(this.Envelope.String(), "Envelope", "Envelope", 1) + `,`,
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
`}`,
}, "")
@@ -757,7 +796,7 @@ func (this *Envelope) String() string {
return "nil"
}
s := strings.Join([]string{`&Envelope{`,
- `Timestamp:` + strings.Replace(strings.Replace(this.Timestamp.String(), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`,
+ `Timestamp:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Timestamp), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`,
`Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`,
`Topic:` + fmt.Sprintf("%v", this.Topic) + `,`,
`Event:` + strings.Replace(fmt.Sprintf("%v", this.Event), "Any", "types.Any", 1) + `,`,
@@ -1262,6 +1301,7 @@ func (m *Envelope) Unmarshal(dAtA []byte) error {
func skipEvents(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
+ depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
@@ -1293,10 +1333,8 @@ func skipEvents(dAtA []byte) (n int, err error) {
break
}
}
- return iNdEx, nil
case 1:
iNdEx += 8
- return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
@@ -1317,55 +1355,30 @@ func skipEvents(dAtA []byte) (n int, err error) {
return 0, ErrInvalidLengthEvents
}
iNdEx += length
- if iNdEx < 0 {
- return 0, ErrInvalidLengthEvents
- }
- return iNdEx, nil
case 3:
- for {
- var innerWire uint64
- var start int = iNdEx
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowEvents
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- innerWire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- innerWireType := int(innerWire & 0x7)
- if innerWireType == 4 {
- break
- }
- next, err := skipEvents(dAtA[start:])
- if err != nil {
- return 0, err
- }
- iNdEx = start + next
- if iNdEx < 0 {
- return 0, ErrInvalidLengthEvents
- }
- }
- return iNdEx, nil
+ depth++
case 4:
- return iNdEx, nil
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupEvents
+ }
+ depth--
case 5:
iNdEx += 4
- return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthEvents
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
}
- panic("unreachable")
+ return 0, io.ErrUnexpectedEOF
}
var (
- ErrInvalidLengthEvents = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowEvents = fmt.Errorf("proto: integer overflow")
+ ErrInvalidLengthEvents = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowEvents = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupEvents = fmt.Errorf("proto: unexpected end of group")
)
diff --git a/vendor/github.com/containerd/containerd/api/services/images/v1/images.pb.go b/vendor/github.com/containerd/containerd/api/services/images/v1/images.pb.go
index eaa1d1750..db912b68b 100644
--- a/vendor/github.com/containerd/containerd/api/services/images/v1/images.pb.go
+++ b/vendor/github.com/containerd/containerd/api/services/images/v1/images.pb.go
@@ -12,8 +12,11 @@ import (
github_com_gogo_protobuf_types "github.com/gogo/protobuf/types"
types1 "github.com/gogo/protobuf/types"
grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
io "io"
math "math"
+ math_bits "math/bits"
reflect "reflect"
strings "strings"
time "time"
@@ -29,7 +32,7 @@ var _ = time.Kitchen
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
type Image struct {
// Name provides a unique name for the image.
@@ -66,7 +69,7 @@ func (m *Image) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Image.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -105,7 +108,7 @@ func (m *GetImageRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, err
return xxx_messageInfo_GetImageRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -144,7 +147,7 @@ func (m *GetImageResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, er
return xxx_messageInfo_GetImageResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -183,7 +186,7 @@ func (m *CreateImageRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte,
return xxx_messageInfo_CreateImageRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -222,7 +225,7 @@ func (m *CreateImageResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte,
return xxx_messageInfo_CreateImageResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -267,7 +270,7 @@ func (m *UpdateImageRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte,
return xxx_messageInfo_UpdateImageRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -306,7 +309,7 @@ func (m *UpdateImageResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte,
return xxx_messageInfo_UpdateImageResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -355,7 +358,7 @@ func (m *ListImagesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, e
return xxx_messageInfo_ListImagesRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -394,7 +397,7 @@ func (m *ListImagesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte,
return xxx_messageInfo_ListImagesResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -438,7 +441,7 @@ func (m *DeleteImageRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte,
return xxx_messageInfo_DeleteImageRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -618,6 +621,26 @@ type ImagesServer interface {
Delete(context.Context, *DeleteImageRequest) (*types1.Empty, error)
}
+// UnimplementedImagesServer can be embedded to have forward compatible implementations.
+type UnimplementedImagesServer struct {
+}
+
+func (*UnimplementedImagesServer) Get(ctx context.Context, req *GetImageRequest) (*GetImageResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Get not implemented")
+}
+func (*UnimplementedImagesServer) List(ctx context.Context, req *ListImagesRequest) (*ListImagesResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method List not implemented")
+}
+func (*UnimplementedImagesServer) Create(ctx context.Context, req *CreateImageRequest) (*CreateImageResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Create not implemented")
+}
+func (*UnimplementedImagesServer) Update(ctx context.Context, req *UpdateImageRequest) (*UpdateImageResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Update not implemented")
+}
+func (*UnimplementedImagesServer) Delete(ctx context.Context, req *DeleteImageRequest) (*types1.Empty, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Delete not implemented")
+}
+
func RegisterImagesServer(s *grpc.Server, srv ImagesServer) {
s.RegisterService(&_Images_serviceDesc, srv)
}
@@ -744,7 +767,7 @@ var _Images_serviceDesc = grpc.ServiceDesc{
func (m *Image) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -752,67 +775,78 @@ func (m *Image) Marshal() (dAtA []byte, err error) {
}
func (m *Image) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Image) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.Name) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintImages(dAtA, i, uint64(len(m.Name)))
- i += copy(dAtA[i:], m.Name)
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ n1, err1 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.UpdatedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.UpdatedAt):])
+ if err1 != nil {
+ return 0, err1
+ }
+ i -= n1
+ i = encodeVarintImages(dAtA, i, uint64(n1))
+ i--
+ dAtA[i] = 0x42
+ n2, err2 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.CreatedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.CreatedAt):])
+ if err2 != nil {
+ return 0, err2
}
+ i -= n2
+ i = encodeVarintImages(dAtA, i, uint64(n2))
+ i--
+ dAtA[i] = 0x3a
+ {
+ size, err := m.Target.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintImages(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
if len(m.Labels) > 0 {
- for k, _ := range m.Labels {
- dAtA[i] = 0x12
- i++
+ for k := range m.Labels {
v := m.Labels[k]
- mapSize := 1 + len(k) + sovImages(uint64(len(k))) + 1 + len(v) + sovImages(uint64(len(v)))
- i = encodeVarintImages(dAtA, i, uint64(mapSize))
- dAtA[i] = 0xa
- i++
+ baseI := i
+ i -= len(v)
+ copy(dAtA[i:], v)
+ i = encodeVarintImages(dAtA, i, uint64(len(v)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(k)
+ copy(dAtA[i:], k)
i = encodeVarintImages(dAtA, i, uint64(len(k)))
- i += copy(dAtA[i:], k)
+ i--
+ dAtA[i] = 0xa
+ i = encodeVarintImages(dAtA, i, uint64(baseI-i))
+ i--
dAtA[i] = 0x12
- i++
- i = encodeVarintImages(dAtA, i, uint64(len(v)))
- i += copy(dAtA[i:], v)
}
}
- dAtA[i] = 0x1a
- i++
- i = encodeVarintImages(dAtA, i, uint64(m.Target.Size()))
- n1, err := m.Target.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
- }
- i += n1
- dAtA[i] = 0x3a
- i++
- i = encodeVarintImages(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.CreatedAt)))
- n2, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.CreatedAt, dAtA[i:])
- if err != nil {
- return 0, err
- }
- i += n2
- dAtA[i] = 0x42
- i++
- i = encodeVarintImages(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.UpdatedAt)))
- n3, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.UpdatedAt, dAtA[i:])
- if err != nil {
- return 0, err
- }
- i += n3
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if len(m.Name) > 0 {
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintImages(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *GetImageRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -820,26 +854,33 @@ func (m *GetImageRequest) Marshal() (dAtA []byte, err error) {
}
func (m *GetImageRequest) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *GetImageRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if len(m.Name) > 0 {
- dAtA[i] = 0xa
- i++
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
i = encodeVarintImages(dAtA, i, uint64(len(m.Name)))
- i += copy(dAtA[i:], m.Name)
- }
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *GetImageResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -847,30 +888,38 @@ func (m *GetImageResponse) Marshal() (dAtA []byte, err error) {
}
func (m *GetImageResponse) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *GetImageResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if m.Image != nil {
- dAtA[i] = 0xa
- i++
- i = encodeVarintImages(dAtA, i, uint64(m.Image.Size()))
- n4, err := m.Image.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
+ {
+ size, err := m.Image.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintImages(dAtA, i, uint64(size))
}
- i += n4
- }
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *CreateImageRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -878,28 +927,36 @@ func (m *CreateImageRequest) Marshal() (dAtA []byte, err error) {
}
func (m *CreateImageRequest) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *CreateImageRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- dAtA[i] = 0xa
- i++
- i = encodeVarintImages(dAtA, i, uint64(m.Image.Size()))
- n5, err := m.Image.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
- }
- i += n5
if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ {
+ size, err := m.Image.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintImages(dAtA, i, uint64(size))
}
- return i, nil
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
}
func (m *CreateImageResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -907,28 +964,36 @@ func (m *CreateImageResponse) Marshal() (dAtA []byte, err error) {
}
func (m *CreateImageResponse) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *CreateImageResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- dAtA[i] = 0xa
- i++
- i = encodeVarintImages(dAtA, i, uint64(m.Image.Size()))
- n6, err := m.Image.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
- }
- i += n6
if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ {
+ size, err := m.Image.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintImages(dAtA, i, uint64(size))
}
- return i, nil
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
}
func (m *UpdateImageRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -936,38 +1001,48 @@ func (m *UpdateImageRequest) Marshal() (dAtA []byte, err error) {
}
func (m *UpdateImageRequest) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *UpdateImageRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- dAtA[i] = 0xa
- i++
- i = encodeVarintImages(dAtA, i, uint64(m.Image.Size()))
- n7, err := m.Image.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
- i += n7
if m.UpdateMask != nil {
+ {
+ size, err := m.UpdateMask.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintImages(dAtA, i, uint64(size))
+ }
+ i--
dAtA[i] = 0x12
- i++
- i = encodeVarintImages(dAtA, i, uint64(m.UpdateMask.Size()))
- n8, err := m.UpdateMask.MarshalTo(dAtA[i:])
+ }
+ {
+ size, err := m.Image.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
- i += n8
- }
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ i -= size
+ i = encodeVarintImages(dAtA, i, uint64(size))
}
- return i, nil
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
}
func (m *UpdateImageResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -975,28 +1050,36 @@ func (m *UpdateImageResponse) Marshal() (dAtA []byte, err error) {
}
func (m *UpdateImageResponse) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *UpdateImageResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- dAtA[i] = 0xa
- i++
- i = encodeVarintImages(dAtA, i, uint64(m.Image.Size()))
- n9, err := m.Image.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
- }
- i += n9
if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ {
+ size, err := m.Image.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintImages(dAtA, i, uint64(size))
}
- return i, nil
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
}
func (m *ListImagesRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1004,35 +1087,35 @@ func (m *ListImagesRequest) Marshal() (dAtA []byte, err error) {
}
func (m *ListImagesRequest) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ListImagesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if len(m.Filters) > 0 {
- for _, s := range m.Filters {
+ for iNdEx := len(m.Filters) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Filters[iNdEx])
+ copy(dAtA[i:], m.Filters[iNdEx])
+ i = encodeVarintImages(dAtA, i, uint64(len(m.Filters[iNdEx])))
+ i--
dAtA[i] = 0xa
- i++
- l = len(s)
- for l >= 1<<7 {
- dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
- l >>= 7
- i++
- }
- dAtA[i] = uint8(l)
- i++
- i += copy(dAtA[i:], s)
}
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
- }
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *ListImagesResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1040,32 +1123,40 @@ func (m *ListImagesResponse) Marshal() (dAtA []byte, err error) {
}
func (m *ListImagesResponse) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ListImagesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if len(m.Images) > 0 {
- for _, msg := range m.Images {
- dAtA[i] = 0xa
- i++
- i = encodeVarintImages(dAtA, i, uint64(msg.Size()))
- n, err := msg.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
+ for iNdEx := len(m.Images) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Images[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintImages(dAtA, i, uint64(size))
}
- i += n
+ i--
+ dAtA[i] = 0xa
}
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
- }
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *DeleteImageRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1073,40 +1164,49 @@ func (m *DeleteImageRequest) Marshal() (dAtA []byte, err error) {
}
func (m *DeleteImageRequest) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DeleteImageRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.Name) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintImages(dAtA, i, uint64(len(m.Name)))
- i += copy(dAtA[i:], m.Name)
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
if m.Sync {
- dAtA[i] = 0x10
- i++
+ i--
if m.Sync {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
- i++
+ i--
+ dAtA[i] = 0x10
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if len(m.Name) > 0 {
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintImages(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func encodeVarintImages(dAtA []byte, offset int, v uint64) int {
+ offset -= sovImages(v)
+ base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
- return offset + 1
+ return base
}
func (m *Image) Size() (n int) {
if m == nil {
@@ -1286,14 +1386,7 @@ func (m *DeleteImageRequest) Size() (n int) {
}
func sovImages(x uint64) (n int) {
- for {
- n++
- x >>= 7
- if x == 0 {
- break
- }
- }
- return n
+ return (math_bits.Len64(x|1) + 6) / 7
}
func sozImages(x uint64) (n int) {
return sovImages(uint64((x << 1) ^ uint64((int64(x) >> 63))))
@@ -1315,9 +1408,9 @@ func (this *Image) String() string {
s := strings.Join([]string{`&Image{`,
`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
`Labels:` + mapStringForLabels + `,`,
- `Target:` + strings.Replace(strings.Replace(this.Target.String(), "Descriptor", "types.Descriptor", 1), `&`, ``, 1) + `,`,
- `CreatedAt:` + strings.Replace(strings.Replace(this.CreatedAt.String(), "Timestamp", "types1.Timestamp", 1), `&`, ``, 1) + `,`,
- `UpdatedAt:` + strings.Replace(strings.Replace(this.UpdatedAt.String(), "Timestamp", "types1.Timestamp", 1), `&`, ``, 1) + `,`,
+ `Target:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Target), "Descriptor", "types.Descriptor", 1), `&`, ``, 1) + `,`,
+ `CreatedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CreatedAt), "Timestamp", "types1.Timestamp", 1), `&`, ``, 1) + `,`,
+ `UpdatedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.UpdatedAt), "Timestamp", "types1.Timestamp", 1), `&`, ``, 1) + `,`,
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
`}`,
}, "")
@@ -1339,7 +1432,7 @@ func (this *GetImageResponse) String() string {
return "nil"
}
s := strings.Join([]string{`&GetImageResponse{`,
- `Image:` + strings.Replace(fmt.Sprintf("%v", this.Image), "Image", "Image", 1) + `,`,
+ `Image:` + strings.Replace(this.Image.String(), "Image", "Image", 1) + `,`,
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
`}`,
}, "")
@@ -1405,8 +1498,13 @@ func (this *ListImagesResponse) String() string {
if this == nil {
return "nil"
}
+ repeatedStringForImages := "[]Image{"
+ for _, f := range this.Images {
+ repeatedStringForImages += strings.Replace(strings.Replace(f.String(), "Image", "Image", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForImages += "}"
s := strings.Join([]string{`&ListImagesResponse{`,
- `Images:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Images), "Image", "Image", 1), `&`, ``, 1) + `,`,
+ `Images:` + repeatedStringForImages + `,`,
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
`}`,
}, "")
@@ -2587,6 +2685,7 @@ func (m *DeleteImageRequest) Unmarshal(dAtA []byte) error {
func skipImages(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
+ depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
@@ -2618,10 +2717,8 @@ func skipImages(dAtA []byte) (n int, err error) {
break
}
}
- return iNdEx, nil
case 1:
iNdEx += 8
- return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
@@ -2642,55 +2739,30 @@ func skipImages(dAtA []byte) (n int, err error) {
return 0, ErrInvalidLengthImages
}
iNdEx += length
- if iNdEx < 0 {
- return 0, ErrInvalidLengthImages
- }
- return iNdEx, nil
case 3:
- for {
- var innerWire uint64
- var start int = iNdEx
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowImages
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- innerWire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- innerWireType := int(innerWire & 0x7)
- if innerWireType == 4 {
- break
- }
- next, err := skipImages(dAtA[start:])
- if err != nil {
- return 0, err
- }
- iNdEx = start + next
- if iNdEx < 0 {
- return 0, ErrInvalidLengthImages
- }
- }
- return iNdEx, nil
+ depth++
case 4:
- return iNdEx, nil
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupImages
+ }
+ depth--
case 5:
iNdEx += 4
- return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthImages
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
}
- panic("unreachable")
+ return 0, io.ErrUnexpectedEOF
}
var (
- ErrInvalidLengthImages = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowImages = fmt.Errorf("proto: integer overflow")
+ ErrInvalidLengthImages = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowImages = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupImages = fmt.Errorf("proto: unexpected end of group")
)
diff --git a/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.pb.go b/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.pb.go
index a4e238685..b9f912b09 100644
--- a/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.pb.go
+++ b/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.pb.go
@@ -12,8 +12,11 @@ import (
github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
types1 "github.com/gogo/protobuf/types"
grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
io "io"
math "math"
+ math_bits "math/bits"
reflect "reflect"
strings "strings"
)
@@ -27,7 +30,7 @@ var _ = math.Inf
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
type Plugin struct {
// Type defines the type of plugin.
@@ -85,7 +88,7 @@ func (m *Plugin) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Plugin.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -134,7 +137,7 @@ func (m *PluginsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, erro
return xxx_messageInfo_PluginsRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -173,7 +176,7 @@ func (m *PluginsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, err
return xxx_messageInfo_PluginsResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -212,7 +215,7 @@ func (m *ServerResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, erro
return xxx_messageInfo_ServerResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -340,6 +343,17 @@ type IntrospectionServer interface {
Server(context.Context, *types1.Empty) (*ServerResponse, error)
}
+// UnimplementedIntrospectionServer can be embedded to have forward compatible implementations.
+type UnimplementedIntrospectionServer struct {
+}
+
+func (*UnimplementedIntrospectionServer) Plugins(ctx context.Context, req *PluginsRequest) (*PluginsResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Plugins not implemented")
+}
+func (*UnimplementedIntrospectionServer) Server(ctx context.Context, req *types1.Empty) (*ServerResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Server not implemented")
+}
+
func RegisterIntrospectionServer(s *grpc.Server, srv IntrospectionServer) {
s.RegisterService(&_Introspection_serviceDesc, srv)
}
@@ -400,7 +414,7 @@ var _Introspection_serviceDesc = grpc.ServiceDesc{
func (m *Plugin) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -408,101 +422,103 @@ func (m *Plugin) Marshal() (dAtA []byte, err error) {
}
func (m *Plugin) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Plugin) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.Type) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintIntrospection(dAtA, i, uint64(len(m.Type)))
- i += copy(dAtA[i:], m.Type)
- }
- if len(m.ID) > 0 {
- dAtA[i] = 0x12
- i++
- i = encodeVarintIntrospection(dAtA, i, uint64(len(m.ID)))
- i += copy(dAtA[i:], m.ID)
- }
- if len(m.Requires) > 0 {
- for _, s := range m.Requires {
- dAtA[i] = 0x1a
- i++
- l = len(s)
- for l >= 1<<7 {
- dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
- l >>= 7
- i++
- }
- dAtA[i] = uint8(l)
- i++
- i += copy(dAtA[i:], s)
- }
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
- if len(m.Platforms) > 0 {
- for _, msg := range m.Platforms {
- dAtA[i] = 0x22
- i++
- i = encodeVarintIntrospection(dAtA, i, uint64(msg.Size()))
- n, err := msg.MarshalTo(dAtA[i:])
+ if m.InitErr != nil {
+ {
+ size, err := m.InitErr.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
- i += n
+ i -= size
+ i = encodeVarintIntrospection(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x3a
+ }
+ if len(m.Capabilities) > 0 {
+ for iNdEx := len(m.Capabilities) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Capabilities[iNdEx])
+ copy(dAtA[i:], m.Capabilities[iNdEx])
+ i = encodeVarintIntrospection(dAtA, i, uint64(len(m.Capabilities[iNdEx])))
+ i--
+ dAtA[i] = 0x32
}
}
if len(m.Exports) > 0 {
- for k, _ := range m.Exports {
- dAtA[i] = 0x2a
- i++
+ for k := range m.Exports {
v := m.Exports[k]
- mapSize := 1 + len(k) + sovIntrospection(uint64(len(k))) + 1 + len(v) + sovIntrospection(uint64(len(v)))
- i = encodeVarintIntrospection(dAtA, i, uint64(mapSize))
- dAtA[i] = 0xa
- i++
- i = encodeVarintIntrospection(dAtA, i, uint64(len(k)))
- i += copy(dAtA[i:], k)
- dAtA[i] = 0x12
- i++
+ baseI := i
+ i -= len(v)
+ copy(dAtA[i:], v)
i = encodeVarintIntrospection(dAtA, i, uint64(len(v)))
- i += copy(dAtA[i:], v)
+ i--
+ dAtA[i] = 0x12
+ i -= len(k)
+ copy(dAtA[i:], k)
+ i = encodeVarintIntrospection(dAtA, i, uint64(len(k)))
+ i--
+ dAtA[i] = 0xa
+ i = encodeVarintIntrospection(dAtA, i, uint64(baseI-i))
+ i--
+ dAtA[i] = 0x2a
}
}
- if len(m.Capabilities) > 0 {
- for _, s := range m.Capabilities {
- dAtA[i] = 0x32
- i++
- l = len(s)
- for l >= 1<<7 {
- dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
- l >>= 7
- i++
- }
- dAtA[i] = uint8(l)
- i++
- i += copy(dAtA[i:], s)
+ if len(m.Platforms) > 0 {
+ for iNdEx := len(m.Platforms) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Platforms[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintIntrospection(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
}
}
- if m.InitErr != nil {
- dAtA[i] = 0x3a
- i++
- i = encodeVarintIntrospection(dAtA, i, uint64(m.InitErr.Size()))
- n1, err := m.InitErr.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
+ if len(m.Requires) > 0 {
+ for iNdEx := len(m.Requires) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Requires[iNdEx])
+ copy(dAtA[i:], m.Requires[iNdEx])
+ i = encodeVarintIntrospection(dAtA, i, uint64(len(m.Requires[iNdEx])))
+ i--
+ dAtA[i] = 0x1a
}
- i += n1
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if len(m.ID) > 0 {
+ i -= len(m.ID)
+ copy(dAtA[i:], m.ID)
+ i = encodeVarintIntrospection(dAtA, i, uint64(len(m.ID)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Type) > 0 {
+ i -= len(m.Type)
+ copy(dAtA[i:], m.Type)
+ i = encodeVarintIntrospection(dAtA, i, uint64(len(m.Type)))
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *PluginsRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -510,35 +526,35 @@ func (m *PluginsRequest) Marshal() (dAtA []byte, err error) {
}
func (m *PluginsRequest) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PluginsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if len(m.Filters) > 0 {
- for _, s := range m.Filters {
+ for iNdEx := len(m.Filters) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Filters[iNdEx])
+ copy(dAtA[i:], m.Filters[iNdEx])
+ i = encodeVarintIntrospection(dAtA, i, uint64(len(m.Filters[iNdEx])))
+ i--
dAtA[i] = 0xa
- i++
- l = len(s)
- for l >= 1<<7 {
- dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
- l >>= 7
- i++
- }
- dAtA[i] = uint8(l)
- i++
- i += copy(dAtA[i:], s)
}
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
- }
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *PluginsResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -546,32 +562,40 @@ func (m *PluginsResponse) Marshal() (dAtA []byte, err error) {
}
func (m *PluginsResponse) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PluginsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if len(m.Plugins) > 0 {
- for _, msg := range m.Plugins {
- dAtA[i] = 0xa
- i++
- i = encodeVarintIntrospection(dAtA, i, uint64(msg.Size()))
- n, err := msg.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
+ for iNdEx := len(m.Plugins) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Plugins[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintIntrospection(dAtA, i, uint64(size))
}
- i += n
+ i--
+ dAtA[i] = 0xa
}
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
- }
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *ServerResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -579,30 +603,39 @@ func (m *ServerResponse) Marshal() (dAtA []byte, err error) {
}
func (m *ServerResponse) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ServerResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if len(m.UUID) > 0 {
- dAtA[i] = 0xa
- i++
+ i -= len(m.UUID)
+ copy(dAtA[i:], m.UUID)
i = encodeVarintIntrospection(dAtA, i, uint64(len(m.UUID)))
- i += copy(dAtA[i:], m.UUID)
- }
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func encodeVarintIntrospection(dAtA []byte, offset int, v uint64) int {
+ offset -= sovIntrospection(v)
+ base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
- return offset + 1
+ return base
}
func (m *Plugin) Size() (n int) {
if m == nil {
@@ -707,14 +740,7 @@ func (m *ServerResponse) Size() (n int) {
}
func sovIntrospection(x uint64) (n int) {
- for {
- n++
- x >>= 7
- if x == 0 {
- break
- }
- }
- return n
+ return (math_bits.Len64(x|1) + 6) / 7
}
func sozIntrospection(x uint64) (n int) {
return sovIntrospection(uint64((x << 1) ^ uint64((int64(x) >> 63))))
@@ -723,6 +749,11 @@ func (this *Plugin) String() string {
if this == nil {
return "nil"
}
+ repeatedStringForPlatforms := "[]Platform{"
+ for _, f := range this.Platforms {
+ repeatedStringForPlatforms += fmt.Sprintf("%v", f) + ","
+ }
+ repeatedStringForPlatforms += "}"
keysForExports := make([]string, 0, len(this.Exports))
for k, _ := range this.Exports {
keysForExports = append(keysForExports, k)
@@ -737,7 +768,7 @@ func (this *Plugin) String() string {
`Type:` + fmt.Sprintf("%v", this.Type) + `,`,
`ID:` + fmt.Sprintf("%v", this.ID) + `,`,
`Requires:` + fmt.Sprintf("%v", this.Requires) + `,`,
- `Platforms:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Platforms), "Platform", "types.Platform", 1), `&`, ``, 1) + `,`,
+ `Platforms:` + repeatedStringForPlatforms + `,`,
`Exports:` + mapStringForExports + `,`,
`Capabilities:` + fmt.Sprintf("%v", this.Capabilities) + `,`,
`InitErr:` + strings.Replace(fmt.Sprintf("%v", this.InitErr), "Status", "rpc.Status", 1) + `,`,
@@ -761,8 +792,13 @@ func (this *PluginsResponse) String() string {
if this == nil {
return "nil"
}
+ repeatedStringForPlugins := "[]Plugin{"
+ for _, f := range this.Plugins {
+ repeatedStringForPlugins += strings.Replace(strings.Replace(f.String(), "Plugin", "Plugin", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForPlugins += "}"
s := strings.Join([]string{`&PluginsResponse{`,
- `Plugins:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Plugins), "Plugin", "Plugin", 1), `&`, ``, 1) + `,`,
+ `Plugins:` + repeatedStringForPlugins + `,`,
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
`}`,
}, "")
@@ -1429,6 +1465,7 @@ func (m *ServerResponse) Unmarshal(dAtA []byte) error {
func skipIntrospection(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
+ depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
@@ -1460,10 +1497,8 @@ func skipIntrospection(dAtA []byte) (n int, err error) {
break
}
}
- return iNdEx, nil
case 1:
iNdEx += 8
- return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
@@ -1484,55 +1519,30 @@ func skipIntrospection(dAtA []byte) (n int, err error) {
return 0, ErrInvalidLengthIntrospection
}
iNdEx += length
- if iNdEx < 0 {
- return 0, ErrInvalidLengthIntrospection
- }
- return iNdEx, nil
case 3:
- for {
- var innerWire uint64
- var start int = iNdEx
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- innerWire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- innerWireType := int(innerWire & 0x7)
- if innerWireType == 4 {
- break
- }
- next, err := skipIntrospection(dAtA[start:])
- if err != nil {
- return 0, err
- }
- iNdEx = start + next
- if iNdEx < 0 {
- return 0, ErrInvalidLengthIntrospection
- }
- }
- return iNdEx, nil
+ depth++
case 4:
- return iNdEx, nil
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupIntrospection
+ }
+ depth--
case 5:
iNdEx += 4
- return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthIntrospection
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
}
- panic("unreachable")
+ return 0, io.ErrUnexpectedEOF
}
var (
- ErrInvalidLengthIntrospection = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowIntrospection = fmt.Errorf("proto: integer overflow")
+ ErrInvalidLengthIntrospection = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowIntrospection = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupIntrospection = fmt.Errorf("proto: unexpected end of group")
)
diff --git a/vendor/github.com/containerd/containerd/api/services/leases/v1/leases.pb.go b/vendor/github.com/containerd/containerd/api/services/leases/v1/leases.pb.go
index 9a3313ef5..4dbac3e09 100644
--- a/vendor/github.com/containerd/containerd/api/services/leases/v1/leases.pb.go
+++ b/vendor/github.com/containerd/containerd/api/services/leases/v1/leases.pb.go
@@ -11,8 +11,11 @@ import (
github_com_gogo_protobuf_types "github.com/gogo/protobuf/types"
types "github.com/gogo/protobuf/types"
grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
io "io"
math "math"
+ math_bits "math/bits"
reflect "reflect"
strings "strings"
time "time"
@@ -28,7 +31,7 @@ var _ = time.Kitchen
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
// Lease is an object which retains resources while it exists.
type Lease struct {
@@ -53,7 +56,7 @@ func (m *Lease) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Lease.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -95,7 +98,7 @@ func (m *CreateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error
return xxx_messageInfo_CreateRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -134,7 +137,7 @@ func (m *CreateResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, erro
return xxx_messageInfo_CreateResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -178,7 +181,7 @@ func (m *DeleteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error
return xxx_messageInfo_DeleteRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -217,7 +220,7 @@ func (m *ListRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
return xxx_messageInfo_ListRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -256,7 +259,7 @@ func (m *ListResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
return xxx_messageInfo_ListResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -299,7 +302,7 @@ func (m *Resource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Resource.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -339,7 +342,7 @@ func (m *AddResourceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte,
return xxx_messageInfo_AddResourceRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -379,7 +382,7 @@ func (m *DeleteResourceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byt
return xxx_messageInfo_DeleteResourceRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -418,7 +421,7 @@ func (m *ListResourcesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte
return xxx_messageInfo_ListResourcesRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -457,7 +460,7 @@ func (m *ListResourcesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byt
return xxx_messageInfo_ListResourcesResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -653,6 +656,29 @@ type LeasesServer interface {
ListResources(context.Context, *ListResourcesRequest) (*ListResourcesResponse, error)
}
+// UnimplementedLeasesServer can be embedded to have forward compatible implementations.
+type UnimplementedLeasesServer struct {
+}
+
+func (*UnimplementedLeasesServer) Create(ctx context.Context, req *CreateRequest) (*CreateResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Create not implemented")
+}
+func (*UnimplementedLeasesServer) Delete(ctx context.Context, req *DeleteRequest) (*types.Empty, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Delete not implemented")
+}
+func (*UnimplementedLeasesServer) List(ctx context.Context, req *ListRequest) (*ListResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method List not implemented")
+}
+func (*UnimplementedLeasesServer) AddResource(ctx context.Context, req *AddResourceRequest) (*types.Empty, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method AddResource not implemented")
+}
+func (*UnimplementedLeasesServer) DeleteResource(ctx context.Context, req *DeleteResourceRequest) (*types.Empty, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method DeleteResource not implemented")
+}
+func (*UnimplementedLeasesServer) ListResources(ctx context.Context, req *ListResourcesRequest) (*ListResourcesResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method ListResources not implemented")
+}
+
func RegisterLeasesServer(s *grpc.Server, srv LeasesServer) {
s.RegisterService(&_Leases_serviceDesc, srv)
}
@@ -801,7 +827,7 @@ var _Leases_serviceDesc = grpc.ServiceDesc{
func (m *Lease) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -809,51 +835,60 @@ func (m *Lease) Marshal() (dAtA []byte, err error) {
}
func (m *Lease) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Lease) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.ID) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintLeases(dAtA, i, uint64(len(m.ID)))
- i += copy(dAtA[i:], m.ID)
- }
- dAtA[i] = 0x12
- i++
- i = encodeVarintLeases(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.CreatedAt)))
- n1, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.CreatedAt, dAtA[i:])
- if err != nil {
- return 0, err
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
- i += n1
if len(m.Labels) > 0 {
- for k, _ := range m.Labels {
- dAtA[i] = 0x1a
- i++
+ for k := range m.Labels {
v := m.Labels[k]
- mapSize := 1 + len(k) + sovLeases(uint64(len(k))) + 1 + len(v) + sovLeases(uint64(len(v)))
- i = encodeVarintLeases(dAtA, i, uint64(mapSize))
- dAtA[i] = 0xa
- i++
- i = encodeVarintLeases(dAtA, i, uint64(len(k)))
- i += copy(dAtA[i:], k)
- dAtA[i] = 0x12
- i++
+ baseI := i
+ i -= len(v)
+ copy(dAtA[i:], v)
i = encodeVarintLeases(dAtA, i, uint64(len(v)))
- i += copy(dAtA[i:], v)
+ i--
+ dAtA[i] = 0x12
+ i -= len(k)
+ copy(dAtA[i:], k)
+ i = encodeVarintLeases(dAtA, i, uint64(len(k)))
+ i--
+ dAtA[i] = 0xa
+ i = encodeVarintLeases(dAtA, i, uint64(baseI-i))
+ i--
+ dAtA[i] = 0x1a
}
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ n1, err1 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.CreatedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.CreatedAt):])
+ if err1 != nil {
+ return 0, err1
}
- return i, nil
+ i -= n1
+ i = encodeVarintLeases(dAtA, i, uint64(n1))
+ i--
+ dAtA[i] = 0x12
+ if len(m.ID) > 0 {
+ i -= len(m.ID)
+ copy(dAtA[i:], m.ID)
+ i = encodeVarintLeases(dAtA, i, uint64(len(m.ID)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
}
func (m *CreateRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -861,43 +896,52 @@ func (m *CreateRequest) Marshal() (dAtA []byte, err error) {
}
func (m *CreateRequest) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *CreateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.ID) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintLeases(dAtA, i, uint64(len(m.ID)))
- i += copy(dAtA[i:], m.ID)
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
if len(m.Labels) > 0 {
- for k, _ := range m.Labels {
- dAtA[i] = 0x1a
- i++
+ for k := range m.Labels {
v := m.Labels[k]
- mapSize := 1 + len(k) + sovLeases(uint64(len(k))) + 1 + len(v) + sovLeases(uint64(len(v)))
- i = encodeVarintLeases(dAtA, i, uint64(mapSize))
- dAtA[i] = 0xa
- i++
- i = encodeVarintLeases(dAtA, i, uint64(len(k)))
- i += copy(dAtA[i:], k)
- dAtA[i] = 0x12
- i++
+ baseI := i
+ i -= len(v)
+ copy(dAtA[i:], v)
i = encodeVarintLeases(dAtA, i, uint64(len(v)))
- i += copy(dAtA[i:], v)
+ i--
+ dAtA[i] = 0x12
+ i -= len(k)
+ copy(dAtA[i:], k)
+ i = encodeVarintLeases(dAtA, i, uint64(len(k)))
+ i--
+ dAtA[i] = 0xa
+ i = encodeVarintLeases(dAtA, i, uint64(baseI-i))
+ i--
+ dAtA[i] = 0x1a
}
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if len(m.ID) > 0 {
+ i -= len(m.ID)
+ copy(dAtA[i:], m.ID)
+ i = encodeVarintLeases(dAtA, i, uint64(len(m.ID)))
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *CreateResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -905,30 +949,38 @@ func (m *CreateResponse) Marshal() (dAtA []byte, err error) {
}
func (m *CreateResponse) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *CreateResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if m.Lease != nil {
- dAtA[i] = 0xa
- i++
- i = encodeVarintLeases(dAtA, i, uint64(m.Lease.Size()))
- n2, err := m.Lease.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
+ {
+ size, err := m.Lease.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintLeases(dAtA, i, uint64(size))
}
- i += n2
- }
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *DeleteRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -936,36 +988,43 @@ func (m *DeleteRequest) Marshal() (dAtA []byte, err error) {
}
func (m *DeleteRequest) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DeleteRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.ID) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintLeases(dAtA, i, uint64(len(m.ID)))
- i += copy(dAtA[i:], m.ID)
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
if m.Sync {
- dAtA[i] = 0x10
- i++
+ i--
if m.Sync {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
- i++
+ i--
+ dAtA[i] = 0x10
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if len(m.ID) > 0 {
+ i -= len(m.ID)
+ copy(dAtA[i:], m.ID)
+ i = encodeVarintLeases(dAtA, i, uint64(len(m.ID)))
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *ListRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -973,35 +1032,35 @@ func (m *ListRequest) Marshal() (dAtA []byte, err error) {
}
func (m *ListRequest) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ListRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if len(m.Filters) > 0 {
- for _, s := range m.Filters {
+ for iNdEx := len(m.Filters) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Filters[iNdEx])
+ copy(dAtA[i:], m.Filters[iNdEx])
+ i = encodeVarintLeases(dAtA, i, uint64(len(m.Filters[iNdEx])))
+ i--
dAtA[i] = 0xa
- i++
- l = len(s)
- for l >= 1<<7 {
- dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
- l >>= 7
- i++
- }
- dAtA[i] = uint8(l)
- i++
- i += copy(dAtA[i:], s)
}
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
- }
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *ListResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1009,32 +1068,40 @@ func (m *ListResponse) Marshal() (dAtA []byte, err error) {
}
func (m *ListResponse) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ListResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if len(m.Leases) > 0 {
- for _, msg := range m.Leases {
- dAtA[i] = 0xa
- i++
- i = encodeVarintLeases(dAtA, i, uint64(msg.Size()))
- n, err := msg.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
+ for iNdEx := len(m.Leases) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Leases[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintLeases(dAtA, i, uint64(size))
}
- i += n
+ i--
+ dAtA[i] = 0xa
}
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
- }
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *Resource) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1042,32 +1109,40 @@ func (m *Resource) Marshal() (dAtA []byte, err error) {
}
func (m *Resource) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Resource) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.ID) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintLeases(dAtA, i, uint64(len(m.ID)))
- i += copy(dAtA[i:], m.ID)
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
if len(m.Type) > 0 {
- dAtA[i] = 0x12
- i++
+ i -= len(m.Type)
+ copy(dAtA[i:], m.Type)
i = encodeVarintLeases(dAtA, i, uint64(len(m.Type)))
- i += copy(dAtA[i:], m.Type)
+ i--
+ dAtA[i] = 0x12
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if len(m.ID) > 0 {
+ i -= len(m.ID)
+ copy(dAtA[i:], m.ID)
+ i = encodeVarintLeases(dAtA, i, uint64(len(m.ID)))
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *AddResourceRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1075,34 +1150,43 @@ func (m *AddResourceRequest) Marshal() (dAtA []byte, err error) {
}
func (m *AddResourceRequest) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AddResourceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.ID) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintLeases(dAtA, i, uint64(len(m.ID)))
- i += copy(dAtA[i:], m.ID)
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
- dAtA[i] = 0x12
- i++
- i = encodeVarintLeases(dAtA, i, uint64(m.Resource.Size()))
- n3, err := m.Resource.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
+ {
+ size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintLeases(dAtA, i, uint64(size))
}
- i += n3
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ i--
+ dAtA[i] = 0x12
+ if len(m.ID) > 0 {
+ i -= len(m.ID)
+ copy(dAtA[i:], m.ID)
+ i = encodeVarintLeases(dAtA, i, uint64(len(m.ID)))
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *DeleteResourceRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1110,34 +1194,43 @@ func (m *DeleteResourceRequest) Marshal() (dAtA []byte, err error) {
}
func (m *DeleteResourceRequest) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DeleteResourceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.ID) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintLeases(dAtA, i, uint64(len(m.ID)))
- i += copy(dAtA[i:], m.ID)
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
- dAtA[i] = 0x12
- i++
- i = encodeVarintLeases(dAtA, i, uint64(m.Resource.Size()))
- n4, err := m.Resource.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
+ {
+ size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintLeases(dAtA, i, uint64(size))
}
- i += n4
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ i--
+ dAtA[i] = 0x12
+ if len(m.ID) > 0 {
+ i -= len(m.ID)
+ copy(dAtA[i:], m.ID)
+ i = encodeVarintLeases(dAtA, i, uint64(len(m.ID)))
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *ListResourcesRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1145,26 +1238,33 @@ func (m *ListResourcesRequest) Marshal() (dAtA []byte, err error) {
}
func (m *ListResourcesRequest) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ListResourcesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if len(m.ID) > 0 {
- dAtA[i] = 0xa
- i++
+ i -= len(m.ID)
+ copy(dAtA[i:], m.ID)
i = encodeVarintLeases(dAtA, i, uint64(len(m.ID)))
- i += copy(dAtA[i:], m.ID)
- }
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *ListResourcesResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1172,36 +1272,46 @@ func (m *ListResourcesResponse) Marshal() (dAtA []byte, err error) {
}
func (m *ListResourcesResponse) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ListResourcesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if len(m.Resources) > 0 {
- for _, msg := range m.Resources {
- dAtA[i] = 0xa
- i++
- i = encodeVarintLeases(dAtA, i, uint64(msg.Size()))
- n, err := msg.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
+ for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Resources[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintLeases(dAtA, i, uint64(size))
}
- i += n
+ i--
+ dAtA[i] = 0xa
}
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
- }
- return i, nil
+ return len(dAtA) - i, nil
}
func encodeVarintLeases(dAtA []byte, offset int, v uint64) int {
+ offset -= sovLeases(v)
+ base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
- return offset + 1
+ return base
}
func (m *Lease) Size() (n int) {
if m == nil {
@@ -1415,14 +1525,7 @@ func (m *ListResourcesResponse) Size() (n int) {
}
func sovLeases(x uint64) (n int) {
- for {
- n++
- x >>= 7
- if x == 0 {
- break
- }
- }
- return n
+ return (math_bits.Len64(x|1) + 6) / 7
}
func sozLeases(x uint64) (n int) {
return sovLeases(uint64((x << 1) ^ uint64((int64(x) >> 63))))
@@ -1443,7 +1546,7 @@ func (this *Lease) String() string {
mapStringForLabels += "}"
s := strings.Join([]string{`&Lease{`,
`ID:` + fmt.Sprintf("%v", this.ID) + `,`,
- `CreatedAt:` + strings.Replace(strings.Replace(this.CreatedAt.String(), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`,
+ `CreatedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CreatedAt), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`,
`Labels:` + mapStringForLabels + `,`,
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
`}`,
@@ -1477,7 +1580,7 @@ func (this *CreateResponse) String() string {
return "nil"
}
s := strings.Join([]string{`&CreateResponse{`,
- `Lease:` + strings.Replace(fmt.Sprintf("%v", this.Lease), "Lease", "Lease", 1) + `,`,
+ `Lease:` + strings.Replace(this.Lease.String(), "Lease", "Lease", 1) + `,`,
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
`}`,
}, "")
@@ -1510,8 +1613,13 @@ func (this *ListResponse) String() string {
if this == nil {
return "nil"
}
+ repeatedStringForLeases := "[]*Lease{"
+ for _, f := range this.Leases {
+ repeatedStringForLeases += strings.Replace(f.String(), "Lease", "Lease", 1) + ","
+ }
+ repeatedStringForLeases += "}"
s := strings.Join([]string{`&ListResponse{`,
- `Leases:` + strings.Replace(fmt.Sprintf("%v", this.Leases), "Lease", "Lease", 1) + `,`,
+ `Leases:` + repeatedStringForLeases + `,`,
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
`}`,
}, "")
@@ -1568,8 +1676,13 @@ func (this *ListResourcesResponse) String() string {
if this == nil {
return "nil"
}
+ repeatedStringForResources := "[]Resource{"
+ for _, f := range this.Resources {
+ repeatedStringForResources += strings.Replace(strings.Replace(f.String(), "Resource", "Resource", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForResources += "}"
s := strings.Join([]string{`&ListResourcesResponse{`,
- `Resources:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Resources), "Resource", "Resource", 1), `&`, ``, 1) + `,`,
+ `Resources:` + repeatedStringForResources + `,`,
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
`}`,
}, "")
@@ -2945,6 +3058,7 @@ func (m *ListResourcesResponse) Unmarshal(dAtA []byte) error {
func skipLeases(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
+ depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
@@ -2976,10 +3090,8 @@ func skipLeases(dAtA []byte) (n int, err error) {
break
}
}
- return iNdEx, nil
case 1:
iNdEx += 8
- return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
@@ -3000,55 +3112,30 @@ func skipLeases(dAtA []byte) (n int, err error) {
return 0, ErrInvalidLengthLeases
}
iNdEx += length
- if iNdEx < 0 {
- return 0, ErrInvalidLengthLeases
- }
- return iNdEx, nil
case 3:
- for {
- var innerWire uint64
- var start int = iNdEx
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowLeases
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- innerWire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- innerWireType := int(innerWire & 0x7)
- if innerWireType == 4 {
- break
- }
- next, err := skipLeases(dAtA[start:])
- if err != nil {
- return 0, err
- }
- iNdEx = start + next
- if iNdEx < 0 {
- return 0, ErrInvalidLengthLeases
- }
- }
- return iNdEx, nil
+ depth++
case 4:
- return iNdEx, nil
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupLeases
+ }
+ depth--
case 5:
iNdEx += 4
- return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthLeases
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
}
- panic("unreachable")
+ return 0, io.ErrUnexpectedEOF
}
var (
- ErrInvalidLengthLeases = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowLeases = fmt.Errorf("proto: integer overflow")
+ ErrInvalidLengthLeases = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowLeases = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupLeases = fmt.Errorf("proto: unexpected end of group")
)
diff --git a/vendor/github.com/containerd/containerd/api/services/namespaces/v1/namespace.pb.go b/vendor/github.com/containerd/containerd/api/services/namespaces/v1/namespace.pb.go
index 3c6cdd7af..0d1d650ba 100644
--- a/vendor/github.com/containerd/containerd/api/services/namespaces/v1/namespace.pb.go
+++ b/vendor/github.com/containerd/containerd/api/services/namespaces/v1/namespace.pb.go
@@ -10,8 +10,11 @@ import (
github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
types "github.com/gogo/protobuf/types"
grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
io "io"
math "math"
+ math_bits "math/bits"
reflect "reflect"
strings "strings"
)
@@ -25,7 +28,7 @@ var _ = math.Inf
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
type Namespace struct {
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
@@ -54,7 +57,7 @@ func (m *Namespace) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Namespace.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -93,7 +96,7 @@ func (m *GetNamespaceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte,
return xxx_messageInfo_GetNamespaceRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -132,7 +135,7 @@ func (m *GetNamespaceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte
return xxx_messageInfo_GetNamespaceResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -171,7 +174,7 @@ func (m *ListNamespacesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byt
return xxx_messageInfo_ListNamespacesRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -210,7 +213,7 @@ func (m *ListNamespacesResponse) XXX_Marshal(b []byte, deterministic bool) ([]by
return xxx_messageInfo_ListNamespacesResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -249,7 +252,7 @@ func (m *CreateNamespaceRequest) XXX_Marshal(b []byte, deterministic bool) ([]by
return xxx_messageInfo_CreateNamespaceRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -288,7 +291,7 @@ func (m *CreateNamespaceResponse) XXX_Marshal(b []byte, deterministic bool) ([]b
return xxx_messageInfo_CreateNamespaceResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -342,7 +345,7 @@ func (m *UpdateNamespaceRequest) XXX_Marshal(b []byte, deterministic bool) ([]by
return xxx_messageInfo_UpdateNamespaceRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -381,7 +384,7 @@ func (m *UpdateNamespaceResponse) XXX_Marshal(b []byte, deterministic bool) ([]b
return xxx_messageInfo_UpdateNamespaceResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -420,7 +423,7 @@ func (m *DeleteNamespaceRequest) XXX_Marshal(b []byte, deterministic bool) ([]by
return xxx_messageInfo_DeleteNamespaceRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -577,6 +580,26 @@ type NamespacesServer interface {
Delete(context.Context, *DeleteNamespaceRequest) (*types.Empty, error)
}
+// UnimplementedNamespacesServer can be embedded to have forward compatible implementations.
+type UnimplementedNamespacesServer struct {
+}
+
+func (*UnimplementedNamespacesServer) Get(ctx context.Context, req *GetNamespaceRequest) (*GetNamespaceResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Get not implemented")
+}
+func (*UnimplementedNamespacesServer) List(ctx context.Context, req *ListNamespacesRequest) (*ListNamespacesResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method List not implemented")
+}
+func (*UnimplementedNamespacesServer) Create(ctx context.Context, req *CreateNamespaceRequest) (*CreateNamespaceResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Create not implemented")
+}
+func (*UnimplementedNamespacesServer) Update(ctx context.Context, req *UpdateNamespaceRequest) (*UpdateNamespaceResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Update not implemented")
+}
+func (*UnimplementedNamespacesServer) Delete(ctx context.Context, req *DeleteNamespaceRequest) (*types.Empty, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Delete not implemented")
+}
+
func RegisterNamespacesServer(s *grpc.Server, srv NamespacesServer) {
s.RegisterService(&_Namespaces_serviceDesc, srv)
}
@@ -703,7 +726,7 @@ var _Namespaces_serviceDesc = grpc.ServiceDesc{
func (m *Namespace) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -711,43 +734,52 @@ func (m *Namespace) Marshal() (dAtA []byte, err error) {
}
func (m *Namespace) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Namespace) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.Name) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintNamespace(dAtA, i, uint64(len(m.Name)))
- i += copy(dAtA[i:], m.Name)
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
if len(m.Labels) > 0 {
- for k, _ := range m.Labels {
- dAtA[i] = 0x12
- i++
+ for k := range m.Labels {
v := m.Labels[k]
- mapSize := 1 + len(k) + sovNamespace(uint64(len(k))) + 1 + len(v) + sovNamespace(uint64(len(v)))
- i = encodeVarintNamespace(dAtA, i, uint64(mapSize))
- dAtA[i] = 0xa
- i++
+ baseI := i
+ i -= len(v)
+ copy(dAtA[i:], v)
+ i = encodeVarintNamespace(dAtA, i, uint64(len(v)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(k)
+ copy(dAtA[i:], k)
i = encodeVarintNamespace(dAtA, i, uint64(len(k)))
- i += copy(dAtA[i:], k)
+ i--
+ dAtA[i] = 0xa
+ i = encodeVarintNamespace(dAtA, i, uint64(baseI-i))
+ i--
dAtA[i] = 0x12
- i++
- i = encodeVarintNamespace(dAtA, i, uint64(len(v)))
- i += copy(dAtA[i:], v)
}
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if len(m.Name) > 0 {
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintNamespace(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *GetNamespaceRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -755,26 +787,33 @@ func (m *GetNamespaceRequest) Marshal() (dAtA []byte, err error) {
}
func (m *GetNamespaceRequest) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *GetNamespaceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if len(m.Name) > 0 {
- dAtA[i] = 0xa
- i++
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
i = encodeVarintNamespace(dAtA, i, uint64(len(m.Name)))
- i += copy(dAtA[i:], m.Name)
- }
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *GetNamespaceResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -782,28 +821,36 @@ func (m *GetNamespaceResponse) Marshal() (dAtA []byte, err error) {
}
func (m *GetNamespaceResponse) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *GetNamespaceResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- dAtA[i] = 0xa
- i++
- i = encodeVarintNamespace(dAtA, i, uint64(m.Namespace.Size()))
- n1, err := m.Namespace.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
- }
- i += n1
if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
- return i, nil
+ {
+ size, err := m.Namespace.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintNamespace(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
}
func (m *ListNamespacesRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -811,26 +858,33 @@ func (m *ListNamespacesRequest) Marshal() (dAtA []byte, err error) {
}
func (m *ListNamespacesRequest) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ListNamespacesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if len(m.Filter) > 0 {
- dAtA[i] = 0xa
- i++
+ i -= len(m.Filter)
+ copy(dAtA[i:], m.Filter)
i = encodeVarintNamespace(dAtA, i, uint64(len(m.Filter)))
- i += copy(dAtA[i:], m.Filter)
- }
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *ListNamespacesResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -838,32 +892,40 @@ func (m *ListNamespacesResponse) Marshal() (dAtA []byte, err error) {
}
func (m *ListNamespacesResponse) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ListNamespacesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if len(m.Namespaces) > 0 {
- for _, msg := range m.Namespaces {
- dAtA[i] = 0xa
- i++
- i = encodeVarintNamespace(dAtA, i, uint64(msg.Size()))
- n, err := msg.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
+ for iNdEx := len(m.Namespaces) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Namespaces[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintNamespace(dAtA, i, uint64(size))
}
- i += n
+ i--
+ dAtA[i] = 0xa
}
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
- }
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *CreateNamespaceRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -871,28 +933,36 @@ func (m *CreateNamespaceRequest) Marshal() (dAtA []byte, err error) {
}
func (m *CreateNamespaceRequest) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *CreateNamespaceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- dAtA[i] = 0xa
- i++
- i = encodeVarintNamespace(dAtA, i, uint64(m.Namespace.Size()))
- n2, err := m.Namespace.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
- }
- i += n2
if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ {
+ size, err := m.Namespace.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintNamespace(dAtA, i, uint64(size))
}
- return i, nil
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
}
func (m *CreateNamespaceResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -900,28 +970,36 @@ func (m *CreateNamespaceResponse) Marshal() (dAtA []byte, err error) {
}
func (m *CreateNamespaceResponse) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *CreateNamespaceResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- dAtA[i] = 0xa
- i++
- i = encodeVarintNamespace(dAtA, i, uint64(m.Namespace.Size()))
- n3, err := m.Namespace.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
- }
- i += n3
if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
- return i, nil
+ {
+ size, err := m.Namespace.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintNamespace(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
}
func (m *UpdateNamespaceRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -929,38 +1007,48 @@ func (m *UpdateNamespaceRequest) Marshal() (dAtA []byte, err error) {
}
func (m *UpdateNamespaceRequest) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *UpdateNamespaceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- dAtA[i] = 0xa
- i++
- i = encodeVarintNamespace(dAtA, i, uint64(m.Namespace.Size()))
- n4, err := m.Namespace.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
- i += n4
if m.UpdateMask != nil {
+ {
+ size, err := m.UpdateMask.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintNamespace(dAtA, i, uint64(size))
+ }
+ i--
dAtA[i] = 0x12
- i++
- i = encodeVarintNamespace(dAtA, i, uint64(m.UpdateMask.Size()))
- n5, err := m.UpdateMask.MarshalTo(dAtA[i:])
+ }
+ {
+ size, err := m.Namespace.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
- i += n5
+ i -= size
+ i = encodeVarintNamespace(dAtA, i, uint64(size))
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
- }
- return i, nil
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
}
func (m *UpdateNamespaceResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -968,28 +1056,36 @@ func (m *UpdateNamespaceResponse) Marshal() (dAtA []byte, err error) {
}
func (m *UpdateNamespaceResponse) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *UpdateNamespaceResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- dAtA[i] = 0xa
- i++
- i = encodeVarintNamespace(dAtA, i, uint64(m.Namespace.Size()))
- n6, err := m.Namespace.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
- }
- i += n6
if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ {
+ size, err := m.Namespace.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintNamespace(dAtA, i, uint64(size))
}
- return i, nil
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
}
func (m *DeleteNamespaceRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -997,30 +1093,39 @@ func (m *DeleteNamespaceRequest) Marshal() (dAtA []byte, err error) {
}
func (m *DeleteNamespaceRequest) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DeleteNamespaceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if len(m.Name) > 0 {
- dAtA[i] = 0xa
- i++
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
i = encodeVarintNamespace(dAtA, i, uint64(len(m.Name)))
- i += copy(dAtA[i:], m.Name)
- }
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func encodeVarintNamespace(dAtA []byte, offset int, v uint64) int {
+ offset -= sovNamespace(v)
+ base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
- return offset + 1
+ return base
}
func (m *Namespace) Size() (n int) {
if m == nil {
@@ -1187,14 +1292,7 @@ func (m *DeleteNamespaceRequest) Size() (n int) {
}
func sovNamespace(x uint64) (n int) {
- for {
- n++
- x >>= 7
- if x == 0 {
- break
- }
- }
- return n
+ return (math_bits.Len64(x|1) + 6) / 7
}
func sozNamespace(x uint64) (n int) {
return sovNamespace(uint64((x << 1) ^ uint64((int64(x) >> 63))))
@@ -1258,8 +1356,13 @@ func (this *ListNamespacesResponse) String() string {
if this == nil {
return "nil"
}
+ repeatedStringForNamespaces := "[]Namespace{"
+ for _, f := range this.Namespaces {
+ repeatedStringForNamespaces += strings.Replace(strings.Replace(f.String(), "Namespace", "Namespace", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForNamespaces += "}"
s := strings.Join([]string{`&ListNamespacesResponse{`,
- `Namespaces:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Namespaces), "Namespace", "Namespace", 1), `&`, ``, 1) + `,`,
+ `Namespaces:` + repeatedStringForNamespaces + `,`,
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
`}`,
}, "")
@@ -2362,6 +2465,7 @@ func (m *DeleteNamespaceRequest) Unmarshal(dAtA []byte) error {
func skipNamespace(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
+ depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
@@ -2393,10 +2497,8 @@ func skipNamespace(dAtA []byte) (n int, err error) {
break
}
}
- return iNdEx, nil
case 1:
iNdEx += 8
- return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
@@ -2417,55 +2519,30 @@ func skipNamespace(dAtA []byte) (n int, err error) {
return 0, ErrInvalidLengthNamespace
}
iNdEx += length
- if iNdEx < 0 {
- return 0, ErrInvalidLengthNamespace
- }
- return iNdEx, nil
case 3:
- for {
- var innerWire uint64
- var start int = iNdEx
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowNamespace
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- innerWire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- innerWireType := int(innerWire & 0x7)
- if innerWireType == 4 {
- break
- }
- next, err := skipNamespace(dAtA[start:])
- if err != nil {
- return 0, err
- }
- iNdEx = start + next
- if iNdEx < 0 {
- return 0, ErrInvalidLengthNamespace
- }
- }
- return iNdEx, nil
+ depth++
case 4:
- return iNdEx, nil
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupNamespace
+ }
+ depth--
case 5:
iNdEx += 4
- return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthNamespace
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
}
- panic("unreachable")
+ return 0, io.ErrUnexpectedEOF
}
var (
- ErrInvalidLengthNamespace = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowNamespace = fmt.Errorf("proto: integer overflow")
+ ErrInvalidLengthNamespace = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowNamespace = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupNamespace = fmt.Errorf("proto: unexpected end of group")
)
diff --git a/vendor/github.com/containerd/containerd/api/services/snapshots/v1/snapshots.pb.go b/vendor/github.com/containerd/containerd/api/services/snapshots/v1/snapshots.pb.go
index 07c985624..1877afded 100644
--- a/vendor/github.com/containerd/containerd/api/services/snapshots/v1/snapshots.pb.go
+++ b/vendor/github.com/containerd/containerd/api/services/snapshots/v1/snapshots.pb.go
@@ -12,8 +12,11 @@ import (
github_com_gogo_protobuf_types "github.com/gogo/protobuf/types"
types1 "github.com/gogo/protobuf/types"
grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
io "io"
math "math"
+ math_bits "math/bits"
reflect "reflect"
strings "strings"
time "time"
@@ -29,7 +32,7 @@ var _ = time.Kitchen
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
type Kind int32
@@ -88,7 +91,7 @@ func (m *PrepareSnapshotRequest) XXX_Marshal(b []byte, deterministic bool) ([]by
return xxx_messageInfo_PrepareSnapshotRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -127,7 +130,7 @@ func (m *PrepareSnapshotResponse) XXX_Marshal(b []byte, deterministic bool) ([]b
return xxx_messageInfo_PrepareSnapshotResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -172,7 +175,7 @@ func (m *ViewSnapshotRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte,
return xxx_messageInfo_ViewSnapshotRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -211,7 +214,7 @@ func (m *ViewSnapshotResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte
return xxx_messageInfo_ViewSnapshotResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -251,7 +254,7 @@ func (m *MountsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error
return xxx_messageInfo_MountsRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -290,7 +293,7 @@ func (m *MountsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, erro
return xxx_messageInfo_MountsResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -330,7 +333,7 @@ func (m *RemoveSnapshotRequest) XXX_Marshal(b []byte, deterministic bool) ([]byt
return xxx_messageInfo_RemoveSnapshotRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -375,7 +378,7 @@ func (m *CommitSnapshotRequest) XXX_Marshal(b []byte, deterministic bool) ([]byt
return xxx_messageInfo_CommitSnapshotRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -415,7 +418,7 @@ func (m *StatSnapshotRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte,
return xxx_messageInfo_StatSnapshotRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -464,7 +467,7 @@ func (m *Info) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Info.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -503,7 +506,7 @@ func (m *StatSnapshotResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte
return xxx_messageInfo_StatSnapshotResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -550,7 +553,7 @@ func (m *UpdateSnapshotRequest) XXX_Marshal(b []byte, deterministic bool) ([]byt
return xxx_messageInfo_UpdateSnapshotRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -589,7 +592,7 @@ func (m *UpdateSnapshotResponse) XXX_Marshal(b []byte, deterministic bool) ([]by
return xxx_messageInfo_UpdateSnapshotResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -639,7 +642,7 @@ func (m *ListSnapshotsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte
return xxx_messageInfo_ListSnapshotsRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -678,7 +681,7 @@ func (m *ListSnapshotsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byt
return xxx_messageInfo_ListSnapshotsResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -718,7 +721,7 @@ func (m *UsageRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
return xxx_messageInfo_UsageRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -758,7 +761,7 @@ func (m *UsageResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error
return xxx_messageInfo_UsageResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -797,7 +800,7 @@ func (m *CleanupRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, erro
return xxx_messageInfo_CleanupRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -1075,6 +1078,41 @@ type SnapshotsServer interface {
Cleanup(context.Context, *CleanupRequest) (*types1.Empty, error)
}
+// UnimplementedSnapshotsServer can be embedded to have forward compatible implementations.
+type UnimplementedSnapshotsServer struct {
+}
+
+func (*UnimplementedSnapshotsServer) Prepare(ctx context.Context, req *PrepareSnapshotRequest) (*PrepareSnapshotResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Prepare not implemented")
+}
+func (*UnimplementedSnapshotsServer) View(ctx context.Context, req *ViewSnapshotRequest) (*ViewSnapshotResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method View not implemented")
+}
+func (*UnimplementedSnapshotsServer) Mounts(ctx context.Context, req *MountsRequest) (*MountsResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Mounts not implemented")
+}
+func (*UnimplementedSnapshotsServer) Commit(ctx context.Context, req *CommitSnapshotRequest) (*types1.Empty, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Commit not implemented")
+}
+func (*UnimplementedSnapshotsServer) Remove(ctx context.Context, req *RemoveSnapshotRequest) (*types1.Empty, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Remove not implemented")
+}
+func (*UnimplementedSnapshotsServer) Stat(ctx context.Context, req *StatSnapshotRequest) (*StatSnapshotResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Stat not implemented")
+}
+func (*UnimplementedSnapshotsServer) Update(ctx context.Context, req *UpdateSnapshotRequest) (*UpdateSnapshotResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Update not implemented")
+}
+func (*UnimplementedSnapshotsServer) List(req *ListSnapshotsRequest, srv Snapshots_ListServer) error {
+ return status.Errorf(codes.Unimplemented, "method List not implemented")
+}
+func (*UnimplementedSnapshotsServer) Usage(ctx context.Context, req *UsageRequest) (*UsageResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Usage not implemented")
+}
+func (*UnimplementedSnapshotsServer) Cleanup(ctx context.Context, req *CleanupRequest) (*types1.Empty, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Cleanup not implemented")
+}
+
func RegisterSnapshotsServer(s *grpc.Server, srv SnapshotsServer) {
s.RegisterService(&_Snapshots_serviceDesc, srv)
}
@@ -1316,7 +1354,7 @@ var _Snapshots_serviceDesc = grpc.ServiceDesc{
func (m *PrepareSnapshotRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1324,55 +1362,66 @@ func (m *PrepareSnapshotRequest) Marshal() (dAtA []byte, err error) {
}
func (m *PrepareSnapshotRequest) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PrepareSnapshotRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.Snapshotter) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Snapshotter)))
- i += copy(dAtA[i:], m.Snapshotter)
- }
- if len(m.Key) > 0 {
- dAtA[i] = 0x12
- i++
- i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Key)))
- i += copy(dAtA[i:], m.Key)
- }
- if len(m.Parent) > 0 {
- dAtA[i] = 0x1a
- i++
- i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Parent)))
- i += copy(dAtA[i:], m.Parent)
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
if len(m.Labels) > 0 {
- for k, _ := range m.Labels {
- dAtA[i] = 0x22
- i++
+ for k := range m.Labels {
v := m.Labels[k]
- mapSize := 1 + len(k) + sovSnapshots(uint64(len(k))) + 1 + len(v) + sovSnapshots(uint64(len(v)))
- i = encodeVarintSnapshots(dAtA, i, uint64(mapSize))
- dAtA[i] = 0xa
- i++
- i = encodeVarintSnapshots(dAtA, i, uint64(len(k)))
- i += copy(dAtA[i:], k)
- dAtA[i] = 0x12
- i++
+ baseI := i
+ i -= len(v)
+ copy(dAtA[i:], v)
i = encodeVarintSnapshots(dAtA, i, uint64(len(v)))
- i += copy(dAtA[i:], v)
+ i--
+ dAtA[i] = 0x12
+ i -= len(k)
+ copy(dAtA[i:], k)
+ i = encodeVarintSnapshots(dAtA, i, uint64(len(k)))
+ i--
+ dAtA[i] = 0xa
+ i = encodeVarintSnapshots(dAtA, i, uint64(baseI-i))
+ i--
+ dAtA[i] = 0x22
}
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if len(m.Parent) > 0 {
+ i -= len(m.Parent)
+ copy(dAtA[i:], m.Parent)
+ i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Parent)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if len(m.Key) > 0 {
+ i -= len(m.Key)
+ copy(dAtA[i:], m.Key)
+ i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Key)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Snapshotter) > 0 {
+ i -= len(m.Snapshotter)
+ copy(dAtA[i:], m.Snapshotter)
+ i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Snapshotter)))
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *PrepareSnapshotResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1380,32 +1429,40 @@ func (m *PrepareSnapshotResponse) Marshal() (dAtA []byte, err error) {
}
func (m *PrepareSnapshotResponse) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PrepareSnapshotResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if len(m.Mounts) > 0 {
- for _, msg := range m.Mounts {
- dAtA[i] = 0xa
- i++
- i = encodeVarintSnapshots(dAtA, i, uint64(msg.Size()))
- n, err := msg.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
+ for iNdEx := len(m.Mounts) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Mounts[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintSnapshots(dAtA, i, uint64(size))
}
- i += n
+ i--
+ dAtA[i] = 0xa
}
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
- }
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *ViewSnapshotRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1413,55 +1470,66 @@ func (m *ViewSnapshotRequest) Marshal() (dAtA []byte, err error) {
}
func (m *ViewSnapshotRequest) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ViewSnapshotRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.Snapshotter) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Snapshotter)))
- i += copy(dAtA[i:], m.Snapshotter)
- }
- if len(m.Key) > 0 {
- dAtA[i] = 0x12
- i++
- i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Key)))
- i += copy(dAtA[i:], m.Key)
- }
- if len(m.Parent) > 0 {
- dAtA[i] = 0x1a
- i++
- i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Parent)))
- i += copy(dAtA[i:], m.Parent)
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
if len(m.Labels) > 0 {
- for k, _ := range m.Labels {
- dAtA[i] = 0x22
- i++
+ for k := range m.Labels {
v := m.Labels[k]
- mapSize := 1 + len(k) + sovSnapshots(uint64(len(k))) + 1 + len(v) + sovSnapshots(uint64(len(v)))
- i = encodeVarintSnapshots(dAtA, i, uint64(mapSize))
- dAtA[i] = 0xa
- i++
- i = encodeVarintSnapshots(dAtA, i, uint64(len(k)))
- i += copy(dAtA[i:], k)
- dAtA[i] = 0x12
- i++
+ baseI := i
+ i -= len(v)
+ copy(dAtA[i:], v)
i = encodeVarintSnapshots(dAtA, i, uint64(len(v)))
- i += copy(dAtA[i:], v)
+ i--
+ dAtA[i] = 0x12
+ i -= len(k)
+ copy(dAtA[i:], k)
+ i = encodeVarintSnapshots(dAtA, i, uint64(len(k)))
+ i--
+ dAtA[i] = 0xa
+ i = encodeVarintSnapshots(dAtA, i, uint64(baseI-i))
+ i--
+ dAtA[i] = 0x22
}
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if len(m.Parent) > 0 {
+ i -= len(m.Parent)
+ copy(dAtA[i:], m.Parent)
+ i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Parent)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if len(m.Key) > 0 {
+ i -= len(m.Key)
+ copy(dAtA[i:], m.Key)
+ i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Key)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Snapshotter) > 0 {
+ i -= len(m.Snapshotter)
+ copy(dAtA[i:], m.Snapshotter)
+ i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Snapshotter)))
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *ViewSnapshotResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1469,32 +1537,40 @@ func (m *ViewSnapshotResponse) Marshal() (dAtA []byte, err error) {
}
func (m *ViewSnapshotResponse) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ViewSnapshotResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if len(m.Mounts) > 0 {
- for _, msg := range m.Mounts {
- dAtA[i] = 0xa
- i++
- i = encodeVarintSnapshots(dAtA, i, uint64(msg.Size()))
- n, err := msg.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
+ for iNdEx := len(m.Mounts) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Mounts[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintSnapshots(dAtA, i, uint64(size))
}
- i += n
+ i--
+ dAtA[i] = 0xa
}
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
- }
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *MountsRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1502,32 +1578,40 @@ func (m *MountsRequest) Marshal() (dAtA []byte, err error) {
}
func (m *MountsRequest) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MountsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.Snapshotter) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Snapshotter)))
- i += copy(dAtA[i:], m.Snapshotter)
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
if len(m.Key) > 0 {
- dAtA[i] = 0x12
- i++
+ i -= len(m.Key)
+ copy(dAtA[i:], m.Key)
i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Key)))
- i += copy(dAtA[i:], m.Key)
+ i--
+ dAtA[i] = 0x12
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if len(m.Snapshotter) > 0 {
+ i -= len(m.Snapshotter)
+ copy(dAtA[i:], m.Snapshotter)
+ i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Snapshotter)))
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *MountsResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1535,32 +1619,40 @@ func (m *MountsResponse) Marshal() (dAtA []byte, err error) {
}
func (m *MountsResponse) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MountsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if len(m.Mounts) > 0 {
- for _, msg := range m.Mounts {
- dAtA[i] = 0xa
- i++
- i = encodeVarintSnapshots(dAtA, i, uint64(msg.Size()))
- n, err := msg.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
+ for iNdEx := len(m.Mounts) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Mounts[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintSnapshots(dAtA, i, uint64(size))
}
- i += n
+ i--
+ dAtA[i] = 0xa
}
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
- }
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *RemoveSnapshotRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1568,32 +1660,40 @@ func (m *RemoveSnapshotRequest) Marshal() (dAtA []byte, err error) {
}
func (m *RemoveSnapshotRequest) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *RemoveSnapshotRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.Snapshotter) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Snapshotter)))
- i += copy(dAtA[i:], m.Snapshotter)
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
if len(m.Key) > 0 {
- dAtA[i] = 0x12
- i++
+ i -= len(m.Key)
+ copy(dAtA[i:], m.Key)
i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Key)))
- i += copy(dAtA[i:], m.Key)
+ i--
+ dAtA[i] = 0x12
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if len(m.Snapshotter) > 0 {
+ i -= len(m.Snapshotter)
+ copy(dAtA[i:], m.Snapshotter)
+ i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Snapshotter)))
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *CommitSnapshotRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1601,55 +1701,66 @@ func (m *CommitSnapshotRequest) Marshal() (dAtA []byte, err error) {
}
func (m *CommitSnapshotRequest) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *CommitSnapshotRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.Snapshotter) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Snapshotter)))
- i += copy(dAtA[i:], m.Snapshotter)
- }
- if len(m.Name) > 0 {
- dAtA[i] = 0x12
- i++
- i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Name)))
- i += copy(dAtA[i:], m.Name)
- }
- if len(m.Key) > 0 {
- dAtA[i] = 0x1a
- i++
- i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Key)))
- i += copy(dAtA[i:], m.Key)
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
if len(m.Labels) > 0 {
- for k, _ := range m.Labels {
- dAtA[i] = 0x22
- i++
+ for k := range m.Labels {
v := m.Labels[k]
- mapSize := 1 + len(k) + sovSnapshots(uint64(len(k))) + 1 + len(v) + sovSnapshots(uint64(len(v)))
- i = encodeVarintSnapshots(dAtA, i, uint64(mapSize))
- dAtA[i] = 0xa
- i++
- i = encodeVarintSnapshots(dAtA, i, uint64(len(k)))
- i += copy(dAtA[i:], k)
- dAtA[i] = 0x12
- i++
+ baseI := i
+ i -= len(v)
+ copy(dAtA[i:], v)
i = encodeVarintSnapshots(dAtA, i, uint64(len(v)))
- i += copy(dAtA[i:], v)
+ i--
+ dAtA[i] = 0x12
+ i -= len(k)
+ copy(dAtA[i:], k)
+ i = encodeVarintSnapshots(dAtA, i, uint64(len(k)))
+ i--
+ dAtA[i] = 0xa
+ i = encodeVarintSnapshots(dAtA, i, uint64(baseI-i))
+ i--
+ dAtA[i] = 0x22
}
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if len(m.Key) > 0 {
+ i -= len(m.Key)
+ copy(dAtA[i:], m.Key)
+ i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Key)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if len(m.Name) > 0 {
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Snapshotter) > 0 {
+ i -= len(m.Snapshotter)
+ copy(dAtA[i:], m.Snapshotter)
+ i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Snapshotter)))
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *StatSnapshotRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1657,32 +1768,40 @@ func (m *StatSnapshotRequest) Marshal() (dAtA []byte, err error) {
}
func (m *StatSnapshotRequest) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *StatSnapshotRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.Snapshotter) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Snapshotter)))
- i += copy(dAtA[i:], m.Snapshotter)
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
if len(m.Key) > 0 {
- dAtA[i] = 0x12
- i++
+ i -= len(m.Key)
+ copy(dAtA[i:], m.Key)
i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Key)))
- i += copy(dAtA[i:], m.Key)
+ i--
+ dAtA[i] = 0x12
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if len(m.Snapshotter) > 0 {
+ i -= len(m.Snapshotter)
+ copy(dAtA[i:], m.Snapshotter)
+ i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Snapshotter)))
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *Info) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1690,70 +1809,80 @@ func (m *Info) Marshal() (dAtA []byte, err error) {
}
func (m *Info) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Info) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.Name) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Name)))
- i += copy(dAtA[i:], m.Name)
- }
- if len(m.Parent) > 0 {
- dAtA[i] = 0x12
- i++
- i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Parent)))
- i += copy(dAtA[i:], m.Parent)
- }
- if m.Kind != 0 {
- dAtA[i] = 0x18
- i++
- i = encodeVarintSnapshots(dAtA, i, uint64(m.Kind))
- }
- dAtA[i] = 0x22
- i++
- i = encodeVarintSnapshots(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.CreatedAt)))
- n1, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.CreatedAt, dAtA[i:])
- if err != nil {
- return 0, err
- }
- i += n1
- dAtA[i] = 0x2a
- i++
- i = encodeVarintSnapshots(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.UpdatedAt)))
- n2, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.UpdatedAt, dAtA[i:])
- if err != nil {
- return 0, err
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
- i += n2
if len(m.Labels) > 0 {
- for k, _ := range m.Labels {
- dAtA[i] = 0x32
- i++
+ for k := range m.Labels {
v := m.Labels[k]
- mapSize := 1 + len(k) + sovSnapshots(uint64(len(k))) + 1 + len(v) + sovSnapshots(uint64(len(v)))
- i = encodeVarintSnapshots(dAtA, i, uint64(mapSize))
- dAtA[i] = 0xa
- i++
- i = encodeVarintSnapshots(dAtA, i, uint64(len(k)))
- i += copy(dAtA[i:], k)
- dAtA[i] = 0x12
- i++
+ baseI := i
+ i -= len(v)
+ copy(dAtA[i:], v)
i = encodeVarintSnapshots(dAtA, i, uint64(len(v)))
- i += copy(dAtA[i:], v)
+ i--
+ dAtA[i] = 0x12
+ i -= len(k)
+ copy(dAtA[i:], k)
+ i = encodeVarintSnapshots(dAtA, i, uint64(len(k)))
+ i--
+ dAtA[i] = 0xa
+ i = encodeVarintSnapshots(dAtA, i, uint64(baseI-i))
+ i--
+ dAtA[i] = 0x32
}
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ n1, err1 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.UpdatedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.UpdatedAt):])
+ if err1 != nil {
+ return 0, err1
+ }
+ i -= n1
+ i = encodeVarintSnapshots(dAtA, i, uint64(n1))
+ i--
+ dAtA[i] = 0x2a
+ n2, err2 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.CreatedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.CreatedAt):])
+ if err2 != nil {
+ return 0, err2
+ }
+ i -= n2
+ i = encodeVarintSnapshots(dAtA, i, uint64(n2))
+ i--
+ dAtA[i] = 0x22
+ if m.Kind != 0 {
+ i = encodeVarintSnapshots(dAtA, i, uint64(m.Kind))
+ i--
+ dAtA[i] = 0x18
}
- return i, nil
+ if len(m.Parent) > 0 {
+ i -= len(m.Parent)
+ copy(dAtA[i:], m.Parent)
+ i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Parent)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Name) > 0 {
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
}
func (m *StatSnapshotResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1761,28 +1890,36 @@ func (m *StatSnapshotResponse) Marshal() (dAtA []byte, err error) {
}
func (m *StatSnapshotResponse) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *StatSnapshotResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- dAtA[i] = 0xa
- i++
- i = encodeVarintSnapshots(dAtA, i, uint64(m.Info.Size()))
- n3, err := m.Info.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
- }
- i += n3
if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ {
+ size, err := m.Info.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintSnapshots(dAtA, i, uint64(size))
}
- return i, nil
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
}
func (m *UpdateSnapshotRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1790,44 +1927,55 @@ func (m *UpdateSnapshotRequest) Marshal() (dAtA []byte, err error) {
}
func (m *UpdateSnapshotRequest) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *UpdateSnapshotRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.Snapshotter) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Snapshotter)))
- i += copy(dAtA[i:], m.Snapshotter)
- }
- dAtA[i] = 0x12
- i++
- i = encodeVarintSnapshots(dAtA, i, uint64(m.Info.Size()))
- n4, err := m.Info.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
- i += n4
if m.UpdateMask != nil {
+ {
+ size, err := m.UpdateMask.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintSnapshots(dAtA, i, uint64(size))
+ }
+ i--
dAtA[i] = 0x1a
- i++
- i = encodeVarintSnapshots(dAtA, i, uint64(m.UpdateMask.Size()))
- n5, err := m.UpdateMask.MarshalTo(dAtA[i:])
+ }
+ {
+ size, err := m.Info.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
- i += n5
+ i -= size
+ i = encodeVarintSnapshots(dAtA, i, uint64(size))
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ i--
+ dAtA[i] = 0x12
+ if len(m.Snapshotter) > 0 {
+ i -= len(m.Snapshotter)
+ copy(dAtA[i:], m.Snapshotter)
+ i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Snapshotter)))
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *UpdateSnapshotResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1835,28 +1983,36 @@ func (m *UpdateSnapshotResponse) Marshal() (dAtA []byte, err error) {
}
func (m *UpdateSnapshotResponse) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *UpdateSnapshotResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- dAtA[i] = 0xa
- i++
- i = encodeVarintSnapshots(dAtA, i, uint64(m.Info.Size()))
- n6, err := m.Info.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
- }
- i += n6
if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ {
+ size, err := m.Info.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintSnapshots(dAtA, i, uint64(size))
}
- return i, nil
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
}
func (m *ListSnapshotsRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1864,41 +2020,42 @@ func (m *ListSnapshotsRequest) Marshal() (dAtA []byte, err error) {
}
func (m *ListSnapshotsRequest) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ListSnapshotsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.Snapshotter) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Snapshotter)))
- i += copy(dAtA[i:], m.Snapshotter)
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
if len(m.Filters) > 0 {
- for _, s := range m.Filters {
+ for iNdEx := len(m.Filters) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Filters[iNdEx])
+ copy(dAtA[i:], m.Filters[iNdEx])
+ i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Filters[iNdEx])))
+ i--
dAtA[i] = 0x12
- i++
- l = len(s)
- for l >= 1<<7 {
- dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
- l >>= 7
- i++
- }
- dAtA[i] = uint8(l)
- i++
- i += copy(dAtA[i:], s)
}
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if len(m.Snapshotter) > 0 {
+ i -= len(m.Snapshotter)
+ copy(dAtA[i:], m.Snapshotter)
+ i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Snapshotter)))
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *ListSnapshotsResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1906,32 +2063,40 @@ func (m *ListSnapshotsResponse) Marshal() (dAtA []byte, err error) {
}
func (m *ListSnapshotsResponse) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ListSnapshotsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if len(m.Info) > 0 {
- for _, msg := range m.Info {
- dAtA[i] = 0xa
- i++
- i = encodeVarintSnapshots(dAtA, i, uint64(msg.Size()))
- n, err := msg.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
+ for iNdEx := len(m.Info) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Info[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintSnapshots(dAtA, i, uint64(size))
}
- i += n
+ i--
+ dAtA[i] = 0xa
}
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
- }
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *UsageRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1939,32 +2104,40 @@ func (m *UsageRequest) Marshal() (dAtA []byte, err error) {
}
func (m *UsageRequest) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *UsageRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.Snapshotter) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Snapshotter)))
- i += copy(dAtA[i:], m.Snapshotter)
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
if len(m.Key) > 0 {
- dAtA[i] = 0x12
- i++
+ i -= len(m.Key)
+ copy(dAtA[i:], m.Key)
i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Key)))
- i += copy(dAtA[i:], m.Key)
+ i--
+ dAtA[i] = 0x12
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if len(m.Snapshotter) > 0 {
+ i -= len(m.Snapshotter)
+ copy(dAtA[i:], m.Snapshotter)
+ i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Snapshotter)))
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *UsageResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1972,30 +2145,36 @@ func (m *UsageResponse) Marshal() (dAtA []byte, err error) {
}
func (m *UsageResponse) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *UsageResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if m.Size_ != 0 {
- dAtA[i] = 0x8
- i++
- i = encodeVarintSnapshots(dAtA, i, uint64(m.Size_))
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
if m.Inodes != 0 {
- dAtA[i] = 0x10
- i++
i = encodeVarintSnapshots(dAtA, i, uint64(m.Inodes))
+ i--
+ dAtA[i] = 0x10
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if m.Size_ != 0 {
+ i = encodeVarintSnapshots(dAtA, i, uint64(m.Size_))
+ i--
+ dAtA[i] = 0x8
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *CleanupRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -2003,30 +2182,39 @@ func (m *CleanupRequest) Marshal() (dAtA []byte, err error) {
}
func (m *CleanupRequest) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *CleanupRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if len(m.Snapshotter) > 0 {
- dAtA[i] = 0xa
- i++
+ i -= len(m.Snapshotter)
+ copy(dAtA[i:], m.Snapshotter)
i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Snapshotter)))
- i += copy(dAtA[i:], m.Snapshotter)
- }
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func encodeVarintSnapshots(dAtA []byte, offset int, v uint64) int {
+ offset -= sovSnapshots(v)
+ base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
- return offset + 1
+ return base
}
func (m *PrepareSnapshotRequest) Size() (n int) {
if m == nil {
@@ -2418,14 +2606,7 @@ func (m *CleanupRequest) Size() (n int) {
}
func sovSnapshots(x uint64) (n int) {
- for {
- n++
- x >>= 7
- if x == 0 {
- break
- }
- }
- return n
+ return (math_bits.Len64(x|1) + 6) / 7
}
func sozSnapshots(x uint64) (n int) {
return sovSnapshots(uint64((x << 1) ^ uint64((int64(x) >> 63))))
@@ -2458,8 +2639,13 @@ func (this *PrepareSnapshotResponse) String() string {
if this == nil {
return "nil"
}
+ repeatedStringForMounts := "[]*Mount{"
+ for _, f := range this.Mounts {
+ repeatedStringForMounts += strings.Replace(fmt.Sprintf("%v", f), "Mount", "types.Mount", 1) + ","
+ }
+ repeatedStringForMounts += "}"
s := strings.Join([]string{`&PrepareSnapshotResponse{`,
- `Mounts:` + strings.Replace(fmt.Sprintf("%v", this.Mounts), "Mount", "types.Mount", 1) + `,`,
+ `Mounts:` + repeatedStringForMounts + `,`,
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
`}`,
}, "")
@@ -2493,8 +2679,13 @@ func (this *ViewSnapshotResponse) String() string {
if this == nil {
return "nil"
}
+ repeatedStringForMounts := "[]*Mount{"
+ for _, f := range this.Mounts {
+ repeatedStringForMounts += strings.Replace(fmt.Sprintf("%v", f), "Mount", "types.Mount", 1) + ","
+ }
+ repeatedStringForMounts += "}"
s := strings.Join([]string{`&ViewSnapshotResponse{`,
- `Mounts:` + strings.Replace(fmt.Sprintf("%v", this.Mounts), "Mount", "types.Mount", 1) + `,`,
+ `Mounts:` + repeatedStringForMounts + `,`,
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
`}`,
}, "")
@@ -2516,8 +2707,13 @@ func (this *MountsResponse) String() string {
if this == nil {
return "nil"
}
+ repeatedStringForMounts := "[]*Mount{"
+ for _, f := range this.Mounts {
+ repeatedStringForMounts += strings.Replace(fmt.Sprintf("%v", f), "Mount", "types.Mount", 1) + ","
+ }
+ repeatedStringForMounts += "}"
s := strings.Join([]string{`&MountsResponse{`,
- `Mounts:` + strings.Replace(fmt.Sprintf("%v", this.Mounts), "Mount", "types.Mount", 1) + `,`,
+ `Mounts:` + repeatedStringForMounts + `,`,
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
`}`,
}, "")
@@ -2589,8 +2785,8 @@ func (this *Info) String() string {
`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
`Parent:` + fmt.Sprintf("%v", this.Parent) + `,`,
`Kind:` + fmt.Sprintf("%v", this.Kind) + `,`,
- `CreatedAt:` + strings.Replace(strings.Replace(this.CreatedAt.String(), "Timestamp", "types1.Timestamp", 1), `&`, ``, 1) + `,`,
- `UpdatedAt:` + strings.Replace(strings.Replace(this.UpdatedAt.String(), "Timestamp", "types1.Timestamp", 1), `&`, ``, 1) + `,`,
+ `CreatedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CreatedAt), "Timestamp", "types1.Timestamp", 1), `&`, ``, 1) + `,`,
+ `UpdatedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.UpdatedAt), "Timestamp", "types1.Timestamp", 1), `&`, ``, 1) + `,`,
`Labels:` + mapStringForLabels + `,`,
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
`}`,
@@ -2648,8 +2844,13 @@ func (this *ListSnapshotsResponse) String() string {
if this == nil {
return "nil"
}
+ repeatedStringForInfo := "[]Info{"
+ for _, f := range this.Info {
+ repeatedStringForInfo += strings.Replace(strings.Replace(f.String(), "Info", "Info", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForInfo += "}"
s := strings.Join([]string{`&ListSnapshotsResponse{`,
- `Info:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Info), "Info", "Info", 1), `&`, ``, 1) + `,`,
+ `Info:` + repeatedStringForInfo + `,`,
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
`}`,
}, "")
@@ -5311,6 +5512,7 @@ func (m *CleanupRequest) Unmarshal(dAtA []byte) error {
func skipSnapshots(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
+ depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
@@ -5342,10 +5544,8 @@ func skipSnapshots(dAtA []byte) (n int, err error) {
break
}
}
- return iNdEx, nil
case 1:
iNdEx += 8
- return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
@@ -5366,55 +5566,30 @@ func skipSnapshots(dAtA []byte) (n int, err error) {
return 0, ErrInvalidLengthSnapshots
}
iNdEx += length
- if iNdEx < 0 {
- return 0, ErrInvalidLengthSnapshots
- }
- return iNdEx, nil
case 3:
- for {
- var innerWire uint64
- var start int = iNdEx
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowSnapshots
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- innerWire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- innerWireType := int(innerWire & 0x7)
- if innerWireType == 4 {
- break
- }
- next, err := skipSnapshots(dAtA[start:])
- if err != nil {
- return 0, err
- }
- iNdEx = start + next
- if iNdEx < 0 {
- return 0, ErrInvalidLengthSnapshots
- }
- }
- return iNdEx, nil
+ depth++
case 4:
- return iNdEx, nil
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupSnapshots
+ }
+ depth--
case 5:
iNdEx += 4
- return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthSnapshots
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
}
- panic("unreachable")
+ return 0, io.ErrUnexpectedEOF
}
var (
- ErrInvalidLengthSnapshots = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowSnapshots = fmt.Errorf("proto: integer overflow")
+ ErrInvalidLengthSnapshots = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowSnapshots = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupSnapshots = fmt.Errorf("proto: unexpected end of group")
)
diff --git a/vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks.pb.go b/vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks.pb.go
index cadaf745f..5ac5af11b 100644
--- a/vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks.pb.go
+++ b/vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks.pb.go
@@ -13,8 +13,11 @@ import (
types1 "github.com/gogo/protobuf/types"
github_com_opencontainers_go_digest "github.com/opencontainers/go-digest"
grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
io "io"
math "math"
+ math_bits "math/bits"
reflect "reflect"
strings "strings"
time "time"
@@ -30,7 +33,7 @@ var _ = time.Kitchen
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
type CreateTaskRequest struct {
ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
@@ -65,7 +68,7 @@ func (m *CreateTaskRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, e
return xxx_messageInfo_CreateTaskRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -105,7 +108,7 @@ func (m *CreateTaskResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte,
return xxx_messageInfo_CreateTaskResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -145,7 +148,7 @@ func (m *StartRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
return xxx_messageInfo_StartRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -184,7 +187,7 @@ func (m *StartResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error
return xxx_messageInfo_StartResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -223,7 +226,7 @@ func (m *DeleteTaskRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, e
return xxx_messageInfo_DeleteTaskRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -265,7 +268,7 @@ func (m *DeleteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, erro
return xxx_messageInfo_DeleteResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -305,7 +308,7 @@ func (m *DeleteProcessRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte
return xxx_messageInfo_DeleteProcessRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -345,7 +348,7 @@ func (m *GetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_GetRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -384,7 +387,7 @@ func (m *GetResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
return xxx_messageInfo_GetResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -423,7 +426,7 @@ func (m *ListTasksRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, er
return xxx_messageInfo_ListTasksRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -462,7 +465,7 @@ func (m *ListTasksResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, e
return xxx_messageInfo_ListTasksResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -504,7 +507,7 @@ func (m *KillRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
return xxx_messageInfo_KillRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -553,7 +556,7 @@ func (m *ExecProcessRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte,
return xxx_messageInfo_ExecProcessRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -591,7 +594,7 @@ func (m *ExecProcessResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte,
return xxx_messageInfo_ExecProcessResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -633,7 +636,7 @@ func (m *ResizePtyRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, er
return xxx_messageInfo_ResizePtyRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -674,7 +677,7 @@ func (m *CloseIORequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, erro
return xxx_messageInfo_CloseIORequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -713,7 +716,7 @@ func (m *PauseTaskRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, er
return xxx_messageInfo_PauseTaskRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -752,7 +755,7 @@ func (m *ResumeTaskRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, e
return xxx_messageInfo_ResumeTaskRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -791,7 +794,7 @@ func (m *ListPidsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, err
return xxx_messageInfo_ListPidsRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -831,7 +834,7 @@ func (m *ListPidsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, er
return xxx_messageInfo_ListPidsResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -872,7 +875,7 @@ func (m *CheckpointTaskRequest) XXX_Marshal(b []byte, deterministic bool) ([]byt
return xxx_messageInfo_CheckpointTaskRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -911,7 +914,7 @@ func (m *CheckpointTaskResponse) XXX_Marshal(b []byte, deterministic bool) ([]by
return xxx_messageInfo_CheckpointTaskResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -951,7 +954,7 @@ func (m *UpdateTaskRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, e
return xxx_messageInfo_UpdateTaskRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -990,7 +993,7 @@ func (m *MetricsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, erro
return xxx_messageInfo_MetricsRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -1029,7 +1032,7 @@ func (m *MetricsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, err
return xxx_messageInfo_MetricsResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -1069,7 +1072,7 @@ func (m *WaitRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
return xxx_messageInfo_WaitRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -1109,7 +1112,7 @@ func (m *WaitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
return xxx_messageInfo_WaitResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -1470,6 +1473,62 @@ type TasksServer interface {
Wait(context.Context, *WaitRequest) (*WaitResponse, error)
}
+// UnimplementedTasksServer can be embedded to have forward compatible implementations.
+type UnimplementedTasksServer struct {
+}
+
+func (*UnimplementedTasksServer) Create(ctx context.Context, req *CreateTaskRequest) (*CreateTaskResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Create not implemented")
+}
+func (*UnimplementedTasksServer) Start(ctx context.Context, req *StartRequest) (*StartResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Start not implemented")
+}
+func (*UnimplementedTasksServer) Delete(ctx context.Context, req *DeleteTaskRequest) (*DeleteResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Delete not implemented")
+}
+func (*UnimplementedTasksServer) DeleteProcess(ctx context.Context, req *DeleteProcessRequest) (*DeleteResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method DeleteProcess not implemented")
+}
+func (*UnimplementedTasksServer) Get(ctx context.Context, req *GetRequest) (*GetResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Get not implemented")
+}
+func (*UnimplementedTasksServer) List(ctx context.Context, req *ListTasksRequest) (*ListTasksResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method List not implemented")
+}
+func (*UnimplementedTasksServer) Kill(ctx context.Context, req *KillRequest) (*types1.Empty, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Kill not implemented")
+}
+func (*UnimplementedTasksServer) Exec(ctx context.Context, req *ExecProcessRequest) (*types1.Empty, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Exec not implemented")
+}
+func (*UnimplementedTasksServer) ResizePty(ctx context.Context, req *ResizePtyRequest) (*types1.Empty, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method ResizePty not implemented")
+}
+func (*UnimplementedTasksServer) CloseIO(ctx context.Context, req *CloseIORequest) (*types1.Empty, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method CloseIO not implemented")
+}
+func (*UnimplementedTasksServer) Pause(ctx context.Context, req *PauseTaskRequest) (*types1.Empty, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Pause not implemented")
+}
+func (*UnimplementedTasksServer) Resume(ctx context.Context, req *ResumeTaskRequest) (*types1.Empty, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Resume not implemented")
+}
+func (*UnimplementedTasksServer) ListPids(ctx context.Context, req *ListPidsRequest) (*ListPidsResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method ListPids not implemented")
+}
+func (*UnimplementedTasksServer) Checkpoint(ctx context.Context, req *CheckpointTaskRequest) (*CheckpointTaskResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Checkpoint not implemented")
+}
+func (*UnimplementedTasksServer) Update(ctx context.Context, req *UpdateTaskRequest) (*types1.Empty, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Update not implemented")
+}
+func (*UnimplementedTasksServer) Metrics(ctx context.Context, req *MetricsRequest) (*MetricsResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Metrics not implemented")
+}
+func (*UnimplementedTasksServer) Wait(ctx context.Context, req *WaitRequest) (*WaitResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Wait not implemented")
+}
+
func RegisterTasksServer(s *grpc.Server, srv TasksServer) {
s.RegisterService(&_Tasks_serviceDesc, srv)
}
@@ -1860,7 +1919,7 @@ var _Tasks_serviceDesc = grpc.ServiceDesc{
func (m *CreateTaskRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1868,86 +1927,102 @@ func (m *CreateTaskRequest) Marshal() (dAtA []byte, err error) {
}
func (m *CreateTaskRequest) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *CreateTaskRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.ContainerID) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID)))
- i += copy(dAtA[i:], m.ContainerID)
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
- if len(m.Rootfs) > 0 {
- for _, msg := range m.Rootfs {
- dAtA[i] = 0x1a
- i++
- i = encodeVarintTasks(dAtA, i, uint64(msg.Size()))
- n, err := msg.MarshalTo(dAtA[i:])
+ if m.Options != nil {
+ {
+ size, err := m.Options.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
- i += n
+ i -= size
+ i = encodeVarintTasks(dAtA, i, uint64(size))
}
+ i--
+ dAtA[i] = 0x4a
}
- if len(m.Stdin) > 0 {
- dAtA[i] = 0x22
- i++
- i = encodeVarintTasks(dAtA, i, uint64(len(m.Stdin)))
- i += copy(dAtA[i:], m.Stdin)
- }
- if len(m.Stdout) > 0 {
- dAtA[i] = 0x2a
- i++
- i = encodeVarintTasks(dAtA, i, uint64(len(m.Stdout)))
- i += copy(dAtA[i:], m.Stdout)
- }
- if len(m.Stderr) > 0 {
- dAtA[i] = 0x32
- i++
- i = encodeVarintTasks(dAtA, i, uint64(len(m.Stderr)))
- i += copy(dAtA[i:], m.Stderr)
+ if m.Checkpoint != nil {
+ {
+ size, err := m.Checkpoint.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintTasks(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x42
}
if m.Terminal {
- dAtA[i] = 0x38
- i++
+ i--
if m.Terminal {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
- i++
+ i--
+ dAtA[i] = 0x38
}
- if m.Checkpoint != nil {
- dAtA[i] = 0x42
- i++
- i = encodeVarintTasks(dAtA, i, uint64(m.Checkpoint.Size()))
- n1, err := m.Checkpoint.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
- }
- i += n1
+ if len(m.Stderr) > 0 {
+ i -= len(m.Stderr)
+ copy(dAtA[i:], m.Stderr)
+ i = encodeVarintTasks(dAtA, i, uint64(len(m.Stderr)))
+ i--
+ dAtA[i] = 0x32
}
- if m.Options != nil {
- dAtA[i] = 0x4a
- i++
- i = encodeVarintTasks(dAtA, i, uint64(m.Options.Size()))
- n2, err := m.Options.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
+ if len(m.Stdout) > 0 {
+ i -= len(m.Stdout)
+ copy(dAtA[i:], m.Stdout)
+ i = encodeVarintTasks(dAtA, i, uint64(len(m.Stdout)))
+ i--
+ dAtA[i] = 0x2a
+ }
+ if len(m.Stdin) > 0 {
+ i -= len(m.Stdin)
+ copy(dAtA[i:], m.Stdin)
+ i = encodeVarintTasks(dAtA, i, uint64(len(m.Stdin)))
+ i--
+ dAtA[i] = 0x22
+ }
+ if len(m.Rootfs) > 0 {
+ for iNdEx := len(m.Rootfs) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Rootfs[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintTasks(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
}
- i += n2
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if len(m.ContainerID) > 0 {
+ i -= len(m.ContainerID)
+ copy(dAtA[i:], m.ContainerID)
+ i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID)))
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *CreateTaskResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1955,31 +2030,38 @@ func (m *CreateTaskResponse) Marshal() (dAtA []byte, err error) {
}
func (m *CreateTaskResponse) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *CreateTaskResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.ContainerID) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID)))
- i += copy(dAtA[i:], m.ContainerID)
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
if m.Pid != 0 {
- dAtA[i] = 0x10
- i++
i = encodeVarintTasks(dAtA, i, uint64(m.Pid))
+ i--
+ dAtA[i] = 0x10
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if len(m.ContainerID) > 0 {
+ i -= len(m.ContainerID)
+ copy(dAtA[i:], m.ContainerID)
+ i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID)))
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *StartRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1987,32 +2069,40 @@ func (m *StartRequest) Marshal() (dAtA []byte, err error) {
}
func (m *StartRequest) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *StartRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.ContainerID) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID)))
- i += copy(dAtA[i:], m.ContainerID)
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
if len(m.ExecID) > 0 {
- dAtA[i] = 0x12
- i++
+ i -= len(m.ExecID)
+ copy(dAtA[i:], m.ExecID)
i = encodeVarintTasks(dAtA, i, uint64(len(m.ExecID)))
- i += copy(dAtA[i:], m.ExecID)
+ i--
+ dAtA[i] = 0x12
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if len(m.ContainerID) > 0 {
+ i -= len(m.ContainerID)
+ copy(dAtA[i:], m.ContainerID)
+ i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID)))
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *StartResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -2020,25 +2110,31 @@ func (m *StartResponse) Marshal() (dAtA []byte, err error) {
}
func (m *StartResponse) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *StartResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if m.Pid != 0 {
- dAtA[i] = 0x8
- i++
i = encodeVarintTasks(dAtA, i, uint64(m.Pid))
+ i--
+ dAtA[i] = 0x8
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
- }
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *DeleteTaskRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -2046,26 +2142,33 @@ func (m *DeleteTaskRequest) Marshal() (dAtA []byte, err error) {
}
func (m *DeleteTaskRequest) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DeleteTaskRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if len(m.ContainerID) > 0 {
- dAtA[i] = 0xa
- i++
+ i -= len(m.ContainerID)
+ copy(dAtA[i:], m.ContainerID)
i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID)))
- i += copy(dAtA[i:], m.ContainerID)
- }
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *DeleteResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -2073,44 +2176,51 @@ func (m *DeleteResponse) Marshal() (dAtA []byte, err error) {
}
func (m *DeleteResponse) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DeleteResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.ID) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintTasks(dAtA, i, uint64(len(m.ID)))
- i += copy(dAtA[i:], m.ID)
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
- if m.Pid != 0 {
- dAtA[i] = 0x10
- i++
- i = encodeVarintTasks(dAtA, i, uint64(m.Pid))
+ n3, err3 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.ExitedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt):])
+ if err3 != nil {
+ return 0, err3
}
+ i -= n3
+ i = encodeVarintTasks(dAtA, i, uint64(n3))
+ i--
+ dAtA[i] = 0x22
if m.ExitStatus != 0 {
- dAtA[i] = 0x18
- i++
i = encodeVarintTasks(dAtA, i, uint64(m.ExitStatus))
+ i--
+ dAtA[i] = 0x18
}
- dAtA[i] = 0x22
- i++
- i = encodeVarintTasks(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt)))
- n3, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.ExitedAt, dAtA[i:])
- if err != nil {
- return 0, err
+ if m.Pid != 0 {
+ i = encodeVarintTasks(dAtA, i, uint64(m.Pid))
+ i--
+ dAtA[i] = 0x10
}
- i += n3
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if len(m.ID) > 0 {
+ i -= len(m.ID)
+ copy(dAtA[i:], m.ID)
+ i = encodeVarintTasks(dAtA, i, uint64(len(m.ID)))
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *DeleteProcessRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -2118,32 +2228,40 @@ func (m *DeleteProcessRequest) Marshal() (dAtA []byte, err error) {
}
func (m *DeleteProcessRequest) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DeleteProcessRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.ContainerID) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID)))
- i += copy(dAtA[i:], m.ContainerID)
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
if len(m.ExecID) > 0 {
- dAtA[i] = 0x12
- i++
+ i -= len(m.ExecID)
+ copy(dAtA[i:], m.ExecID)
i = encodeVarintTasks(dAtA, i, uint64(len(m.ExecID)))
- i += copy(dAtA[i:], m.ExecID)
+ i--
+ dAtA[i] = 0x12
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if len(m.ContainerID) > 0 {
+ i -= len(m.ContainerID)
+ copy(dAtA[i:], m.ContainerID)
+ i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID)))
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *GetRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -2151,32 +2269,40 @@ func (m *GetRequest) Marshal() (dAtA []byte, err error) {
}
func (m *GetRequest) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *GetRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.ContainerID) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID)))
- i += copy(dAtA[i:], m.ContainerID)
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
if len(m.ExecID) > 0 {
- dAtA[i] = 0x12
- i++
+ i -= len(m.ExecID)
+ copy(dAtA[i:], m.ExecID)
i = encodeVarintTasks(dAtA, i, uint64(len(m.ExecID)))
- i += copy(dAtA[i:], m.ExecID)
+ i--
+ dAtA[i] = 0x12
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if len(m.ContainerID) > 0 {
+ i -= len(m.ContainerID)
+ copy(dAtA[i:], m.ContainerID)
+ i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID)))
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *GetResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -2184,30 +2310,38 @@ func (m *GetResponse) Marshal() (dAtA []byte, err error) {
}
func (m *GetResponse) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *GetResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if m.Process != nil {
- dAtA[i] = 0xa
- i++
- i = encodeVarintTasks(dAtA, i, uint64(m.Process.Size()))
- n4, err := m.Process.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
+ {
+ size, err := m.Process.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintTasks(dAtA, i, uint64(size))
}
- i += n4
- }
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *ListTasksRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -2215,26 +2349,33 @@ func (m *ListTasksRequest) Marshal() (dAtA []byte, err error) {
}
func (m *ListTasksRequest) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ListTasksRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if len(m.Filter) > 0 {
- dAtA[i] = 0xa
- i++
+ i -= len(m.Filter)
+ copy(dAtA[i:], m.Filter)
i = encodeVarintTasks(dAtA, i, uint64(len(m.Filter)))
- i += copy(dAtA[i:], m.Filter)
- }
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *ListTasksResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -2242,32 +2383,40 @@ func (m *ListTasksResponse) Marshal() (dAtA []byte, err error) {
}
func (m *ListTasksResponse) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ListTasksResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if len(m.Tasks) > 0 {
- for _, msg := range m.Tasks {
- dAtA[i] = 0xa
- i++
- i = encodeVarintTasks(dAtA, i, uint64(msg.Size()))
- n, err := msg.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
+ for iNdEx := len(m.Tasks) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Tasks[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintTasks(dAtA, i, uint64(size))
}
- i += n
+ i--
+ dAtA[i] = 0xa
}
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
- }
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *KillRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -2275,47 +2424,55 @@ func (m *KillRequest) Marshal() (dAtA []byte, err error) {
}
func (m *KillRequest) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *KillRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.ContainerID) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID)))
- i += copy(dAtA[i:], m.ContainerID)
- }
- if len(m.ExecID) > 0 {
- dAtA[i] = 0x12
- i++
- i = encodeVarintTasks(dAtA, i, uint64(len(m.ExecID)))
- i += copy(dAtA[i:], m.ExecID)
- }
- if m.Signal != 0 {
- dAtA[i] = 0x18
- i++
- i = encodeVarintTasks(dAtA, i, uint64(m.Signal))
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
if m.All {
- dAtA[i] = 0x20
- i++
+ i--
if m.All {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
- i++
+ i--
+ dAtA[i] = 0x20
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if m.Signal != 0 {
+ i = encodeVarintTasks(dAtA, i, uint64(m.Signal))
+ i--
+ dAtA[i] = 0x18
+ }
+ if len(m.ExecID) > 0 {
+ i -= len(m.ExecID)
+ copy(dAtA[i:], m.ExecID)
+ i = encodeVarintTasks(dAtA, i, uint64(len(m.ExecID)))
+ i--
+ dAtA[i] = 0x12
}
- return i, nil
+ if len(m.ContainerID) > 0 {
+ i -= len(m.ContainerID)
+ copy(dAtA[i:], m.ContainerID)
+ i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
}
func (m *ExecProcessRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -2323,70 +2480,83 @@ func (m *ExecProcessRequest) Marshal() (dAtA []byte, err error) {
}
func (m *ExecProcessRequest) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ExecProcessRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.ContainerID) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID)))
- i += copy(dAtA[i:], m.ContainerID)
- }
- if len(m.Stdin) > 0 {
- dAtA[i] = 0x12
- i++
- i = encodeVarintTasks(dAtA, i, uint64(len(m.Stdin)))
- i += copy(dAtA[i:], m.Stdin)
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
- if len(m.Stdout) > 0 {
- dAtA[i] = 0x1a
- i++
- i = encodeVarintTasks(dAtA, i, uint64(len(m.Stdout)))
- i += copy(dAtA[i:], m.Stdout)
+ if len(m.ExecID) > 0 {
+ i -= len(m.ExecID)
+ copy(dAtA[i:], m.ExecID)
+ i = encodeVarintTasks(dAtA, i, uint64(len(m.ExecID)))
+ i--
+ dAtA[i] = 0x3a
}
- if len(m.Stderr) > 0 {
- dAtA[i] = 0x22
- i++
- i = encodeVarintTasks(dAtA, i, uint64(len(m.Stderr)))
- i += copy(dAtA[i:], m.Stderr)
+ if m.Spec != nil {
+ {
+ size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintTasks(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x32
}
if m.Terminal {
- dAtA[i] = 0x28
- i++
+ i--
if m.Terminal {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
- i++
+ i--
+ dAtA[i] = 0x28
}
- if m.Spec != nil {
- dAtA[i] = 0x32
- i++
- i = encodeVarintTasks(dAtA, i, uint64(m.Spec.Size()))
- n5, err := m.Spec.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
- }
- i += n5
+ if len(m.Stderr) > 0 {
+ i -= len(m.Stderr)
+ copy(dAtA[i:], m.Stderr)
+ i = encodeVarintTasks(dAtA, i, uint64(len(m.Stderr)))
+ i--
+ dAtA[i] = 0x22
}
- if len(m.ExecID) > 0 {
- dAtA[i] = 0x3a
- i++
- i = encodeVarintTasks(dAtA, i, uint64(len(m.ExecID)))
- i += copy(dAtA[i:], m.ExecID)
+ if len(m.Stdout) > 0 {
+ i -= len(m.Stdout)
+ copy(dAtA[i:], m.Stdout)
+ i = encodeVarintTasks(dAtA, i, uint64(len(m.Stdout)))
+ i--
+ dAtA[i] = 0x1a
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if len(m.Stdin) > 0 {
+ i -= len(m.Stdin)
+ copy(dAtA[i:], m.Stdin)
+ i = encodeVarintTasks(dAtA, i, uint64(len(m.Stdin)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.ContainerID) > 0 {
+ i -= len(m.ContainerID)
+ copy(dAtA[i:], m.ContainerID)
+ i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID)))
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *ExecProcessResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -2394,20 +2564,26 @@ func (m *ExecProcessResponse) Marshal() (dAtA []byte, err error) {
}
func (m *ExecProcessResponse) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ExecProcessResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *ResizePtyRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -2415,42 +2591,50 @@ func (m *ResizePtyRequest) Marshal() (dAtA []byte, err error) {
}
func (m *ResizePtyRequest) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ResizePtyRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.ContainerID) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID)))
- i += copy(dAtA[i:], m.ContainerID)
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
- if len(m.ExecID) > 0 {
- dAtA[i] = 0x12
- i++
- i = encodeVarintTasks(dAtA, i, uint64(len(m.ExecID)))
- i += copy(dAtA[i:], m.ExecID)
+ if m.Height != 0 {
+ i = encodeVarintTasks(dAtA, i, uint64(m.Height))
+ i--
+ dAtA[i] = 0x20
}
if m.Width != 0 {
- dAtA[i] = 0x18
- i++
i = encodeVarintTasks(dAtA, i, uint64(m.Width))
+ i--
+ dAtA[i] = 0x18
}
- if m.Height != 0 {
- dAtA[i] = 0x20
- i++
- i = encodeVarintTasks(dAtA, i, uint64(m.Height))
+ if len(m.ExecID) > 0 {
+ i -= len(m.ExecID)
+ copy(dAtA[i:], m.ExecID)
+ i = encodeVarintTasks(dAtA, i, uint64(len(m.ExecID)))
+ i--
+ dAtA[i] = 0x12
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if len(m.ContainerID) > 0 {
+ i -= len(m.ContainerID)
+ copy(dAtA[i:], m.ContainerID)
+ i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID)))
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *CloseIORequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -2458,42 +2642,50 @@ func (m *CloseIORequest) Marshal() (dAtA []byte, err error) {
}
func (m *CloseIORequest) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *CloseIORequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.ContainerID) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID)))
- i += copy(dAtA[i:], m.ContainerID)
- }
- if len(m.ExecID) > 0 {
- dAtA[i] = 0x12
- i++
- i = encodeVarintTasks(dAtA, i, uint64(len(m.ExecID)))
- i += copy(dAtA[i:], m.ExecID)
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
if m.Stdin {
- dAtA[i] = 0x18
- i++
+ i--
if m.Stdin {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
- i++
+ i--
+ dAtA[i] = 0x18
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if len(m.ExecID) > 0 {
+ i -= len(m.ExecID)
+ copy(dAtA[i:], m.ExecID)
+ i = encodeVarintTasks(dAtA, i, uint64(len(m.ExecID)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.ContainerID) > 0 {
+ i -= len(m.ContainerID)
+ copy(dAtA[i:], m.ContainerID)
+ i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID)))
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *PauseTaskRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -2501,26 +2693,33 @@ func (m *PauseTaskRequest) Marshal() (dAtA []byte, err error) {
}
func (m *PauseTaskRequest) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PauseTaskRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if len(m.ContainerID) > 0 {
- dAtA[i] = 0xa
- i++
+ i -= len(m.ContainerID)
+ copy(dAtA[i:], m.ContainerID)
i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID)))
- i += copy(dAtA[i:], m.ContainerID)
- }
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *ResumeTaskRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -2528,26 +2727,33 @@ func (m *ResumeTaskRequest) Marshal() (dAtA []byte, err error) {
}
func (m *ResumeTaskRequest) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ResumeTaskRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if len(m.ContainerID) > 0 {
- dAtA[i] = 0xa
- i++
+ i -= len(m.ContainerID)
+ copy(dAtA[i:], m.ContainerID)
i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID)))
- i += copy(dAtA[i:], m.ContainerID)
- }
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *ListPidsRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -2555,26 +2761,33 @@ func (m *ListPidsRequest) Marshal() (dAtA []byte, err error) {
}
func (m *ListPidsRequest) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ListPidsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if len(m.ContainerID) > 0 {
- dAtA[i] = 0xa
- i++
+ i -= len(m.ContainerID)
+ copy(dAtA[i:], m.ContainerID)
i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID)))
- i += copy(dAtA[i:], m.ContainerID)
- }
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *ListPidsResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -2582,32 +2795,40 @@ func (m *ListPidsResponse) Marshal() (dAtA []byte, err error) {
}
func (m *ListPidsResponse) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ListPidsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if len(m.Processes) > 0 {
- for _, msg := range m.Processes {
- dAtA[i] = 0xa
- i++
- i = encodeVarintTasks(dAtA, i, uint64(msg.Size()))
- n, err := msg.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
+ for iNdEx := len(m.Processes) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Processes[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintTasks(dAtA, i, uint64(size))
}
- i += n
+ i--
+ dAtA[i] = 0xa
}
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
- }
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *CheckpointTaskRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -2615,42 +2836,52 @@ func (m *CheckpointTaskRequest) Marshal() (dAtA []byte, err error) {
}
func (m *CheckpointTaskRequest) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *CheckpointTaskRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.ContainerID) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID)))
- i += copy(dAtA[i:], m.ContainerID)
- }
- if len(m.ParentCheckpoint) > 0 {
- dAtA[i] = 0x12
- i++
- i = encodeVarintTasks(dAtA, i, uint64(len(m.ParentCheckpoint)))
- i += copy(dAtA[i:], m.ParentCheckpoint)
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
if m.Options != nil {
- dAtA[i] = 0x1a
- i++
- i = encodeVarintTasks(dAtA, i, uint64(m.Options.Size()))
- n6, err := m.Options.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
+ {
+ size, err := m.Options.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintTasks(dAtA, i, uint64(size))
}
- i += n6
+ i--
+ dAtA[i] = 0x1a
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if len(m.ParentCheckpoint) > 0 {
+ i -= len(m.ParentCheckpoint)
+ copy(dAtA[i:], m.ParentCheckpoint)
+ i = encodeVarintTasks(dAtA, i, uint64(len(m.ParentCheckpoint)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.ContainerID) > 0 {
+ i -= len(m.ContainerID)
+ copy(dAtA[i:], m.ContainerID)
+ i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID)))
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *CheckpointTaskResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -2658,32 +2889,40 @@ func (m *CheckpointTaskResponse) Marshal() (dAtA []byte, err error) {
}
func (m *CheckpointTaskResponse) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *CheckpointTaskResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if len(m.Descriptors) > 0 {
- for _, msg := range m.Descriptors {
- dAtA[i] = 0xa
- i++
- i = encodeVarintTasks(dAtA, i, uint64(msg.Size()))
- n, err := msg.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
+ for iNdEx := len(m.Descriptors) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Descriptors[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintTasks(dAtA, i, uint64(size))
}
- i += n
+ i--
+ dAtA[i] = 0xa
}
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
- }
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *UpdateTaskRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -2691,36 +2930,45 @@ func (m *UpdateTaskRequest) Marshal() (dAtA []byte, err error) {
}
func (m *UpdateTaskRequest) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *UpdateTaskRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.ContainerID) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID)))
- i += copy(dAtA[i:], m.ContainerID)
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
if m.Resources != nil {
- dAtA[i] = 0x12
- i++
- i = encodeVarintTasks(dAtA, i, uint64(m.Resources.Size()))
- n7, err := m.Resources.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
+ {
+ size, err := m.Resources.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintTasks(dAtA, i, uint64(size))
}
- i += n7
+ i--
+ dAtA[i] = 0x12
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if len(m.ContainerID) > 0 {
+ i -= len(m.ContainerID)
+ copy(dAtA[i:], m.ContainerID)
+ i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID)))
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *MetricsRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -2728,35 +2976,35 @@ func (m *MetricsRequest) Marshal() (dAtA []byte, err error) {
}
func (m *MetricsRequest) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MetricsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if len(m.Filters) > 0 {
- for _, s := range m.Filters {
+ for iNdEx := len(m.Filters) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Filters[iNdEx])
+ copy(dAtA[i:], m.Filters[iNdEx])
+ i = encodeVarintTasks(dAtA, i, uint64(len(m.Filters[iNdEx])))
+ i--
dAtA[i] = 0xa
- i++
- l = len(s)
- for l >= 1<<7 {
- dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
- l >>= 7
- i++
- }
- dAtA[i] = uint8(l)
- i++
- i += copy(dAtA[i:], s)
}
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
- }
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *MetricsResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -2764,32 +3012,40 @@ func (m *MetricsResponse) Marshal() (dAtA []byte, err error) {
}
func (m *MetricsResponse) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MetricsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if len(m.Metrics) > 0 {
- for _, msg := range m.Metrics {
- dAtA[i] = 0xa
- i++
- i = encodeVarintTasks(dAtA, i, uint64(msg.Size()))
- n, err := msg.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
+ for iNdEx := len(m.Metrics) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Metrics[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintTasks(dAtA, i, uint64(size))
}
- i += n
+ i--
+ dAtA[i] = 0xa
}
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
- }
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *WaitRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -2797,32 +3053,40 @@ func (m *WaitRequest) Marshal() (dAtA []byte, err error) {
}
func (m *WaitRequest) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *WaitRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.ContainerID) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID)))
- i += copy(dAtA[i:], m.ContainerID)
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
if len(m.ExecID) > 0 {
- dAtA[i] = 0x12
- i++
+ i -= len(m.ExecID)
+ copy(dAtA[i:], m.ExecID)
i = encodeVarintTasks(dAtA, i, uint64(len(m.ExecID)))
- i += copy(dAtA[i:], m.ExecID)
+ i--
+ dAtA[i] = 0x12
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if len(m.ContainerID) > 0 {
+ i -= len(m.ContainerID)
+ copy(dAtA[i:], m.ContainerID)
+ i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID)))
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *WaitResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -2830,37 +3094,45 @@ func (m *WaitResponse) Marshal() (dAtA []byte, err error) {
}
func (m *WaitResponse) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *WaitResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if m.ExitStatus != 0 {
- dAtA[i] = 0x8
- i++
- i = encodeVarintTasks(dAtA, i, uint64(m.ExitStatus))
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
- dAtA[i] = 0x12
- i++
- i = encodeVarintTasks(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt)))
- n8, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.ExitedAt, dAtA[i:])
- if err != nil {
- return 0, err
+ n8, err8 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.ExitedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt):])
+ if err8 != nil {
+ return 0, err8
}
- i += n8
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ i -= n8
+ i = encodeVarintTasks(dAtA, i, uint64(n8))
+ i--
+ dAtA[i] = 0x12
+ if m.ExitStatus != 0 {
+ i = encodeVarintTasks(dAtA, i, uint64(m.ExitStatus))
+ i--
+ dAtA[i] = 0x8
}
- return i, nil
+ return len(dAtA) - i, nil
}
func encodeVarintTasks(dAtA []byte, offset int, v uint64) int {
+ offset -= sovTasks(v)
+ base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
- return offset + 1
+ return base
}
func (m *CreateTaskRequest) Size() (n int) {
if m == nil {
@@ -3419,14 +3691,7 @@ func (m *WaitResponse) Size() (n int) {
}
func sovTasks(x uint64) (n int) {
- for {
- n++
- x >>= 7
- if x == 0 {
- break
- }
- }
- return n
+ return (math_bits.Len64(x|1) + 6) / 7
}
func sozTasks(x uint64) (n int) {
return sovTasks(uint64((x << 1) ^ uint64((int64(x) >> 63))))
@@ -3435,9 +3700,14 @@ func (this *CreateTaskRequest) String() string {
if this == nil {
return "nil"
}
+ repeatedStringForRootfs := "[]*Mount{"
+ for _, f := range this.Rootfs {
+ repeatedStringForRootfs += strings.Replace(fmt.Sprintf("%v", f), "Mount", "types.Mount", 1) + ","
+ }
+ repeatedStringForRootfs += "}"
s := strings.Join([]string{`&CreateTaskRequest{`,
`ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`,
- `Rootfs:` + strings.Replace(fmt.Sprintf("%v", this.Rootfs), "Mount", "types.Mount", 1) + `,`,
+ `Rootfs:` + repeatedStringForRootfs + `,`,
`Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`,
`Stdout:` + fmt.Sprintf("%v", this.Stdout) + `,`,
`Stderr:` + fmt.Sprintf("%v", this.Stderr) + `,`,
@@ -3503,7 +3773,7 @@ func (this *DeleteResponse) String() string {
`ID:` + fmt.Sprintf("%v", this.ID) + `,`,
`Pid:` + fmt.Sprintf("%v", this.Pid) + `,`,
`ExitStatus:` + fmt.Sprintf("%v", this.ExitStatus) + `,`,
- `ExitedAt:` + strings.Replace(strings.Replace(this.ExitedAt.String(), "Timestamp", "types1.Timestamp", 1), `&`, ``, 1) + `,`,
+ `ExitedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ExitedAt), "Timestamp", "types1.Timestamp", 1), `&`, ``, 1) + `,`,
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
`}`,
}, "")
@@ -3559,8 +3829,13 @@ func (this *ListTasksResponse) String() string {
if this == nil {
return "nil"
}
+ repeatedStringForTasks := "[]*Process{"
+ for _, f := range this.Tasks {
+ repeatedStringForTasks += strings.Replace(fmt.Sprintf("%v", f), "Process", "task.Process", 1) + ","
+ }
+ repeatedStringForTasks += "}"
s := strings.Join([]string{`&ListTasksResponse{`,
- `Tasks:` + strings.Replace(fmt.Sprintf("%v", this.Tasks), "Process", "task.Process", 1) + `,`,
+ `Tasks:` + repeatedStringForTasks + `,`,
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
`}`,
}, "")
@@ -3671,8 +3946,13 @@ func (this *ListPidsResponse) String() string {
if this == nil {
return "nil"
}
+ repeatedStringForProcesses := "[]*ProcessInfo{"
+ for _, f := range this.Processes {
+ repeatedStringForProcesses += strings.Replace(fmt.Sprintf("%v", f), "ProcessInfo", "task.ProcessInfo", 1) + ","
+ }
+ repeatedStringForProcesses += "}"
s := strings.Join([]string{`&ListPidsResponse{`,
- `Processes:` + strings.Replace(fmt.Sprintf("%v", this.Processes), "ProcessInfo", "task.ProcessInfo", 1) + `,`,
+ `Processes:` + repeatedStringForProcesses + `,`,
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
`}`,
}, "")
@@ -3695,8 +3975,13 @@ func (this *CheckpointTaskResponse) String() string {
if this == nil {
return "nil"
}
+ repeatedStringForDescriptors := "[]*Descriptor{"
+ for _, f := range this.Descriptors {
+ repeatedStringForDescriptors += strings.Replace(fmt.Sprintf("%v", f), "Descriptor", "types.Descriptor", 1) + ","
+ }
+ repeatedStringForDescriptors += "}"
s := strings.Join([]string{`&CheckpointTaskResponse{`,
- `Descriptors:` + strings.Replace(fmt.Sprintf("%v", this.Descriptors), "Descriptor", "types.Descriptor", 1) + `,`,
+ `Descriptors:` + repeatedStringForDescriptors + `,`,
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
`}`,
}, "")
@@ -3729,8 +4014,13 @@ func (this *MetricsResponse) String() string {
if this == nil {
return "nil"
}
+ repeatedStringForMetrics := "[]*Metric{"
+ for _, f := range this.Metrics {
+ repeatedStringForMetrics += strings.Replace(fmt.Sprintf("%v", f), "Metric", "types.Metric", 1) + ","
+ }
+ repeatedStringForMetrics += "}"
s := strings.Join([]string{`&MetricsResponse{`,
- `Metrics:` + strings.Replace(fmt.Sprintf("%v", this.Metrics), "Metric", "types.Metric", 1) + `,`,
+ `Metrics:` + repeatedStringForMetrics + `,`,
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
`}`,
}, "")
@@ -3754,7 +4044,7 @@ func (this *WaitResponse) String() string {
}
s := strings.Join([]string{`&WaitResponse{`,
`ExitStatus:` + fmt.Sprintf("%v", this.ExitStatus) + `,`,
- `ExitedAt:` + strings.Replace(strings.Replace(this.ExitedAt.String(), "Timestamp", "types1.Timestamp", 1), `&`, ``, 1) + `,`,
+ `ExitedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ExitedAt), "Timestamp", "types1.Timestamp", 1), `&`, ``, 1) + `,`,
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
`}`,
}, "")
@@ -7001,6 +7291,7 @@ func (m *WaitResponse) Unmarshal(dAtA []byte) error {
func skipTasks(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
+ depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
@@ -7032,10 +7323,8 @@ func skipTasks(dAtA []byte) (n int, err error) {
break
}
}
- return iNdEx, nil
case 1:
iNdEx += 8
- return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
@@ -7056,55 +7345,30 @@ func skipTasks(dAtA []byte) (n int, err error) {
return 0, ErrInvalidLengthTasks
}
iNdEx += length
- if iNdEx < 0 {
- return 0, ErrInvalidLengthTasks
- }
- return iNdEx, nil
case 3:
- for {
- var innerWire uint64
- var start int = iNdEx
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowTasks
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- innerWire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- innerWireType := int(innerWire & 0x7)
- if innerWireType == 4 {
- break
- }
- next, err := skipTasks(dAtA[start:])
- if err != nil {
- return 0, err
- }
- iNdEx = start + next
- if iNdEx < 0 {
- return 0, ErrInvalidLengthTasks
- }
- }
- return iNdEx, nil
+ depth++
case 4:
- return iNdEx, nil
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupTasks
+ }
+ depth--
case 5:
iNdEx += 4
- return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthTasks
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
}
- panic("unreachable")
+ return 0, io.ErrUnexpectedEOF
}
var (
- ErrInvalidLengthTasks = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowTasks = fmt.Errorf("proto: integer overflow")
+ ErrInvalidLengthTasks = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowTasks = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupTasks = fmt.Errorf("proto: unexpected end of group")
)
diff --git a/vendor/github.com/containerd/containerd/api/services/ttrpc/events/v1/events.pb.go b/vendor/github.com/containerd/containerd/api/services/ttrpc/events/v1/events.pb.go
index 4bd5828a4..0e61351d5 100644
--- a/vendor/github.com/containerd/containerd/api/services/ttrpc/events/v1/events.pb.go
+++ b/vendor/github.com/containerd/containerd/api/services/ttrpc/events/v1/events.pb.go
@@ -13,6 +13,7 @@ import (
types "github.com/gogo/protobuf/types"
io "io"
math "math"
+ math_bits "math/bits"
reflect "reflect"
strings "strings"
time "time"
@@ -28,7 +29,7 @@ var _ = time.Kitchen
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
type ForwardRequest struct {
Envelope *Envelope `protobuf:"bytes,1,opt,name=envelope,proto3" json:"envelope,omitempty"`
@@ -50,7 +51,7 @@ func (m *ForwardRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, erro
return xxx_messageInfo_ForwardRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -92,7 +93,7 @@ func (m *Envelope) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Envelope.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -179,7 +180,7 @@ func (m *Envelope) Field(fieldpath []string) (string, bool) {
func (m *ForwardRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -187,30 +188,38 @@ func (m *ForwardRequest) Marshal() (dAtA []byte, err error) {
}
func (m *ForwardRequest) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ForwardRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if m.Envelope != nil {
- dAtA[i] = 0xa
- i++
- i = encodeVarintEvents(dAtA, i, uint64(m.Envelope.Size()))
- n1, err := m.Envelope.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
+ {
+ size, err := m.Envelope.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintEvents(dAtA, i, uint64(size))
}
- i += n1
- }
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *Envelope) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -218,54 +227,66 @@ func (m *Envelope) Marshal() (dAtA []byte, err error) {
}
func (m *Envelope) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Envelope) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- dAtA[i] = 0xa
- i++
- i = encodeVarintEvents(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp)))
- n2, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i:])
- if err != nil {
- return 0, err
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
- i += n2
- if len(m.Namespace) > 0 {
- dAtA[i] = 0x12
- i++
- i = encodeVarintEvents(dAtA, i, uint64(len(m.Namespace)))
- i += copy(dAtA[i:], m.Namespace)
+ if m.Event != nil {
+ {
+ size, err := m.Event.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintEvents(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
}
if len(m.Topic) > 0 {
- dAtA[i] = 0x1a
- i++
+ i -= len(m.Topic)
+ copy(dAtA[i:], m.Topic)
i = encodeVarintEvents(dAtA, i, uint64(len(m.Topic)))
- i += copy(dAtA[i:], m.Topic)
+ i--
+ dAtA[i] = 0x1a
}
- if m.Event != nil {
- dAtA[i] = 0x22
- i++
- i = encodeVarintEvents(dAtA, i, uint64(m.Event.Size()))
- n3, err := m.Event.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
- }
- i += n3
+ if len(m.Namespace) > 0 {
+ i -= len(m.Namespace)
+ copy(dAtA[i:], m.Namespace)
+ i = encodeVarintEvents(dAtA, i, uint64(len(m.Namespace)))
+ i--
+ dAtA[i] = 0x12
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ n3, err3 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp):])
+ if err3 != nil {
+ return 0, err3
}
- return i, nil
+ i -= n3
+ i = encodeVarintEvents(dAtA, i, uint64(n3))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
}
func encodeVarintEvents(dAtA []byte, offset int, v uint64) int {
+ offset -= sovEvents(v)
+ base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
- return offset + 1
+ return base
}
func (m *ForwardRequest) Size() (n int) {
if m == nil {
@@ -310,14 +331,7 @@ func (m *Envelope) Size() (n int) {
}
func sovEvents(x uint64) (n int) {
- for {
- n++
- x >>= 7
- if x == 0 {
- break
- }
- }
- return n
+ return (math_bits.Len64(x|1) + 6) / 7
}
func sozEvents(x uint64) (n int) {
return sovEvents(uint64((x << 1) ^ uint64((int64(x) >> 63))))
@@ -327,7 +341,7 @@ func (this *ForwardRequest) String() string {
return "nil"
}
s := strings.Join([]string{`&ForwardRequest{`,
- `Envelope:` + strings.Replace(fmt.Sprintf("%v", this.Envelope), "Envelope", "Envelope", 1) + `,`,
+ `Envelope:` + strings.Replace(this.Envelope.String(), "Envelope", "Envelope", 1) + `,`,
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
`}`,
}, "")
@@ -338,7 +352,7 @@ func (this *Envelope) String() string {
return "nil"
}
s := strings.Join([]string{`&Envelope{`,
- `Timestamp:` + strings.Replace(strings.Replace(this.Timestamp.String(), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`,
+ `Timestamp:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Timestamp), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`,
`Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`,
`Topic:` + fmt.Sprintf("%v", this.Topic) + `,`,
`Event:` + strings.Replace(fmt.Sprintf("%v", this.Event), "Any", "types.Any", 1) + `,`,
@@ -669,6 +683,7 @@ func (m *Envelope) Unmarshal(dAtA []byte) error {
func skipEvents(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
+ depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
@@ -700,10 +715,8 @@ func skipEvents(dAtA []byte) (n int, err error) {
break
}
}
- return iNdEx, nil
case 1:
iNdEx += 8
- return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
@@ -724,55 +737,30 @@ func skipEvents(dAtA []byte) (n int, err error) {
return 0, ErrInvalidLengthEvents
}
iNdEx += length
- if iNdEx < 0 {
- return 0, ErrInvalidLengthEvents
- }
- return iNdEx, nil
case 3:
- for {
- var innerWire uint64
- var start int = iNdEx
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowEvents
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- innerWire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- innerWireType := int(innerWire & 0x7)
- if innerWireType == 4 {
- break
- }
- next, err := skipEvents(dAtA[start:])
- if err != nil {
- return 0, err
- }
- iNdEx = start + next
- if iNdEx < 0 {
- return 0, ErrInvalidLengthEvents
- }
- }
- return iNdEx, nil
+ depth++
case 4:
- return iNdEx, nil
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupEvents
+ }
+ depth--
case 5:
iNdEx += 4
- return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthEvents
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
}
- panic("unreachable")
+ return 0, io.ErrUnexpectedEOF
}
var (
- ErrInvalidLengthEvents = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowEvents = fmt.Errorf("proto: integer overflow")
+ ErrInvalidLengthEvents = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowEvents = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupEvents = fmt.Errorf("proto: unexpected end of group")
)
diff --git a/vendor/github.com/containerd/containerd/api/services/version/v1/version.pb.go b/vendor/github.com/containerd/containerd/api/services/version/v1/version.pb.go
index cc392889b..81b8c3395 100644
--- a/vendor/github.com/containerd/containerd/api/services/version/v1/version.pb.go
+++ b/vendor/github.com/containerd/containerd/api/services/version/v1/version.pb.go
@@ -9,8 +9,11 @@ import (
proto "github.com/gogo/protobuf/proto"
types "github.com/gogo/protobuf/types"
grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
io "io"
math "math"
+ math_bits "math/bits"
reflect "reflect"
strings "strings"
)
@@ -24,7 +27,7 @@ var _ = math.Inf
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
type VersionResponse struct {
Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"`
@@ -47,7 +50,7 @@ func (m *VersionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, err
return xxx_messageInfo_VersionResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -131,6 +134,14 @@ type VersionServer interface {
Version(context.Context, *types.Empty) (*VersionResponse, error)
}
+// UnimplementedVersionServer can be embedded to have forward compatible implementations.
+type UnimplementedVersionServer struct {
+}
+
+func (*UnimplementedVersionServer) Version(ctx context.Context, req *types.Empty) (*VersionResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Version not implemented")
+}
+
func RegisterVersionServer(s *grpc.Server, srv VersionServer) {
s.RegisterService(&_Version_serviceDesc, srv)
}
@@ -169,7 +180,7 @@ var _Version_serviceDesc = grpc.ServiceDesc{
func (m *VersionResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -177,36 +188,46 @@ func (m *VersionResponse) Marshal() (dAtA []byte, err error) {
}
func (m *VersionResponse) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *VersionResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.Version) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintVersion(dAtA, i, uint64(len(m.Version)))
- i += copy(dAtA[i:], m.Version)
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
if len(m.Revision) > 0 {
- dAtA[i] = 0x12
- i++
+ i -= len(m.Revision)
+ copy(dAtA[i:], m.Revision)
i = encodeVarintVersion(dAtA, i, uint64(len(m.Revision)))
- i += copy(dAtA[i:], m.Revision)
+ i--
+ dAtA[i] = 0x12
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if len(m.Version) > 0 {
+ i -= len(m.Version)
+ copy(dAtA[i:], m.Version)
+ i = encodeVarintVersion(dAtA, i, uint64(len(m.Version)))
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func encodeVarintVersion(dAtA []byte, offset int, v uint64) int {
+ offset -= sovVersion(v)
+ base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
- return offset + 1
+ return base
}
func (m *VersionResponse) Size() (n int) {
if m == nil {
@@ -229,14 +250,7 @@ func (m *VersionResponse) Size() (n int) {
}
func sovVersion(x uint64) (n int) {
- for {
- n++
- x >>= 7
- if x == 0 {
- break
- }
- }
- return n
+ return (math_bits.Len64(x|1) + 6) / 7
}
func sozVersion(x uint64) (n int) {
return sovVersion(uint64((x << 1) ^ uint64((int64(x) >> 63))))
@@ -382,6 +396,7 @@ func (m *VersionResponse) Unmarshal(dAtA []byte) error {
func skipVersion(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
+ depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
@@ -413,10 +428,8 @@ func skipVersion(dAtA []byte) (n int, err error) {
break
}
}
- return iNdEx, nil
case 1:
iNdEx += 8
- return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
@@ -437,55 +450,30 @@ func skipVersion(dAtA []byte) (n int, err error) {
return 0, ErrInvalidLengthVersion
}
iNdEx += length
- if iNdEx < 0 {
- return 0, ErrInvalidLengthVersion
- }
- return iNdEx, nil
case 3:
- for {
- var innerWire uint64
- var start int = iNdEx
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowVersion
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- innerWire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- innerWireType := int(innerWire & 0x7)
- if innerWireType == 4 {
- break
- }
- next, err := skipVersion(dAtA[start:])
- if err != nil {
- return 0, err
- }
- iNdEx = start + next
- if iNdEx < 0 {
- return 0, ErrInvalidLengthVersion
- }
- }
- return iNdEx, nil
+ depth++
case 4:
- return iNdEx, nil
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupVersion
+ }
+ depth--
case 5:
iNdEx += 4
- return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthVersion
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
}
- panic("unreachable")
+ return 0, io.ErrUnexpectedEOF
}
var (
- ErrInvalidLengthVersion = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowVersion = fmt.Errorf("proto: integer overflow")
+ ErrInvalidLengthVersion = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowVersion = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupVersion = fmt.Errorf("proto: unexpected end of group")
)
diff --git a/vendor/github.com/containerd/containerd/api/types/descriptor.pb.go b/vendor/github.com/containerd/containerd/api/types/descriptor.pb.go
index c0179d297..437d41f23 100644
--- a/vendor/github.com/containerd/containerd/api/types/descriptor.pb.go
+++ b/vendor/github.com/containerd/containerd/api/types/descriptor.pb.go
@@ -10,6 +10,7 @@ import (
github_com_opencontainers_go_digest "github.com/opencontainers/go-digest"
io "io"
math "math"
+ math_bits "math/bits"
reflect "reflect"
strings "strings"
)
@@ -23,7 +24,7 @@ var _ = math.Inf
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
// Descriptor describes a blob in a content store.
//
@@ -53,7 +54,7 @@ func (m *Descriptor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Descriptor.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -108,7 +109,7 @@ var fileDescriptor_37f958df3707db9e = []byte{
func (m *Descriptor) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -116,58 +117,70 @@ func (m *Descriptor) Marshal() (dAtA []byte, err error) {
}
func (m *Descriptor) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Descriptor) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.MediaType) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintDescriptor(dAtA, i, uint64(len(m.MediaType)))
- i += copy(dAtA[i:], m.MediaType)
- }
- if len(m.Digest) > 0 {
- dAtA[i] = 0x12
- i++
- i = encodeVarintDescriptor(dAtA, i, uint64(len(m.Digest)))
- i += copy(dAtA[i:], m.Digest)
- }
- if m.Size_ != 0 {
- dAtA[i] = 0x18
- i++
- i = encodeVarintDescriptor(dAtA, i, uint64(m.Size_))
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
if len(m.Annotations) > 0 {
- for k, _ := range m.Annotations {
- dAtA[i] = 0x2a
- i++
+ for k := range m.Annotations {
v := m.Annotations[k]
- mapSize := 1 + len(k) + sovDescriptor(uint64(len(k))) + 1 + len(v) + sovDescriptor(uint64(len(v)))
- i = encodeVarintDescriptor(dAtA, i, uint64(mapSize))
- dAtA[i] = 0xa
- i++
- i = encodeVarintDescriptor(dAtA, i, uint64(len(k)))
- i += copy(dAtA[i:], k)
- dAtA[i] = 0x12
- i++
+ baseI := i
+ i -= len(v)
+ copy(dAtA[i:], v)
i = encodeVarintDescriptor(dAtA, i, uint64(len(v)))
- i += copy(dAtA[i:], v)
+ i--
+ dAtA[i] = 0x12
+ i -= len(k)
+ copy(dAtA[i:], k)
+ i = encodeVarintDescriptor(dAtA, i, uint64(len(k)))
+ i--
+ dAtA[i] = 0xa
+ i = encodeVarintDescriptor(dAtA, i, uint64(baseI-i))
+ i--
+ dAtA[i] = 0x2a
}
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if m.Size_ != 0 {
+ i = encodeVarintDescriptor(dAtA, i, uint64(m.Size_))
+ i--
+ dAtA[i] = 0x18
+ }
+ if len(m.Digest) > 0 {
+ i -= len(m.Digest)
+ copy(dAtA[i:], m.Digest)
+ i = encodeVarintDescriptor(dAtA, i, uint64(len(m.Digest)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.MediaType) > 0 {
+ i -= len(m.MediaType)
+ copy(dAtA[i:], m.MediaType)
+ i = encodeVarintDescriptor(dAtA, i, uint64(len(m.MediaType)))
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func encodeVarintDescriptor(dAtA []byte, offset int, v uint64) int {
+ offset -= sovDescriptor(v)
+ base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
- return offset + 1
+ return base
}
func (m *Descriptor) Size() (n int) {
if m == nil {
@@ -201,14 +214,7 @@ func (m *Descriptor) Size() (n int) {
}
func sovDescriptor(x uint64) (n int) {
- for {
- n++
- x >>= 7
- if x == 0 {
- break
- }
- }
- return n
+ return (math_bits.Len64(x|1) + 6) / 7
}
func sozDescriptor(x uint64) (n int) {
return sovDescriptor(uint64((x << 1) ^ uint64((int64(x) >> 63))))
@@ -512,6 +518,7 @@ func (m *Descriptor) Unmarshal(dAtA []byte) error {
func skipDescriptor(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
+ depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
@@ -543,10 +550,8 @@ func skipDescriptor(dAtA []byte) (n int, err error) {
break
}
}
- return iNdEx, nil
case 1:
iNdEx += 8
- return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
@@ -567,55 +572,30 @@ func skipDescriptor(dAtA []byte) (n int, err error) {
return 0, ErrInvalidLengthDescriptor
}
iNdEx += length
- if iNdEx < 0 {
- return 0, ErrInvalidLengthDescriptor
- }
- return iNdEx, nil
case 3:
- for {
- var innerWire uint64
- var start int = iNdEx
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowDescriptor
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- innerWire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- innerWireType := int(innerWire & 0x7)
- if innerWireType == 4 {
- break
- }
- next, err := skipDescriptor(dAtA[start:])
- if err != nil {
- return 0, err
- }
- iNdEx = start + next
- if iNdEx < 0 {
- return 0, ErrInvalidLengthDescriptor
- }
- }
- return iNdEx, nil
+ depth++
case 4:
- return iNdEx, nil
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupDescriptor
+ }
+ depth--
case 5:
iNdEx += 4
- return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthDescriptor
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
}
- panic("unreachable")
+ return 0, io.ErrUnexpectedEOF
}
var (
- ErrInvalidLengthDescriptor = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowDescriptor = fmt.Errorf("proto: integer overflow")
+ ErrInvalidLengthDescriptor = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowDescriptor = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupDescriptor = fmt.Errorf("proto: unexpected end of group")
)
diff --git a/vendor/github.com/containerd/containerd/api/types/metrics.pb.go b/vendor/github.com/containerd/containerd/api/types/metrics.pb.go
index c231d343a..89a8d9cd6 100644
--- a/vendor/github.com/containerd/containerd/api/types/metrics.pb.go
+++ b/vendor/github.com/containerd/containerd/api/types/metrics.pb.go
@@ -10,6 +10,7 @@ import (
types "github.com/gogo/protobuf/types"
io "io"
math "math"
+ math_bits "math/bits"
reflect "reflect"
strings "strings"
time "time"
@@ -25,7 +26,7 @@ var _ = time.Kitchen
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
type Metric struct {
Timestamp time.Time `protobuf:"bytes,1,opt,name=timestamp,proto3,stdtime" json:"timestamp"`
@@ -49,7 +50,7 @@ func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Metric.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -100,7 +101,7 @@ var fileDescriptor_8d594d87edf6e6bc = []byte{
func (m *Metric) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -108,48 +109,59 @@ func (m *Metric) Marshal() (dAtA []byte, err error) {
}
func (m *Metric) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Metric) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- dAtA[i] = 0xa
- i++
- i = encodeVarintMetrics(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp)))
- n1, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i:])
- if err != nil {
- return 0, err
- }
- i += n1
- if len(m.ID) > 0 {
- dAtA[i] = 0x12
- i++
- i = encodeVarintMetrics(dAtA, i, uint64(len(m.ID)))
- i += copy(dAtA[i:], m.ID)
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
if m.Data != nil {
- dAtA[i] = 0x1a
- i++
- i = encodeVarintMetrics(dAtA, i, uint64(m.Data.Size()))
- n2, err := m.Data.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
+ {
+ size, err := m.Data.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintMetrics(dAtA, i, uint64(size))
}
- i += n2
+ i--
+ dAtA[i] = 0x1a
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if len(m.ID) > 0 {
+ i -= len(m.ID)
+ copy(dAtA[i:], m.ID)
+ i = encodeVarintMetrics(dAtA, i, uint64(len(m.ID)))
+ i--
+ dAtA[i] = 0x12
}
- return i, nil
+ n2, err2 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp):])
+ if err2 != nil {
+ return 0, err2
+ }
+ i -= n2
+ i = encodeVarintMetrics(dAtA, i, uint64(n2))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
}
func encodeVarintMetrics(dAtA []byte, offset int, v uint64) int {
+ offset -= sovMetrics(v)
+ base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
- return offset + 1
+ return base
}
func (m *Metric) Size() (n int) {
if m == nil {
@@ -174,14 +186,7 @@ func (m *Metric) Size() (n int) {
}
func sovMetrics(x uint64) (n int) {
- for {
- n++
- x >>= 7
- if x == 0 {
- break
- }
- }
- return n
+ return (math_bits.Len64(x|1) + 6) / 7
}
func sozMetrics(x uint64) (n int) {
return sovMetrics(uint64((x << 1) ^ uint64((int64(x) >> 63))))
@@ -191,7 +196,7 @@ func (this *Metric) String() string {
return "nil"
}
s := strings.Join([]string{`&Metric{`,
- `Timestamp:` + strings.Replace(strings.Replace(this.Timestamp.String(), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`,
+ `Timestamp:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Timestamp), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`,
`ID:` + fmt.Sprintf("%v", this.ID) + `,`,
`Data:` + strings.Replace(fmt.Sprintf("%v", this.Data), "Any", "types.Any", 1) + `,`,
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
@@ -365,6 +370,7 @@ func (m *Metric) Unmarshal(dAtA []byte) error {
func skipMetrics(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
+ depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
@@ -396,10 +402,8 @@ func skipMetrics(dAtA []byte) (n int, err error) {
break
}
}
- return iNdEx, nil
case 1:
iNdEx += 8
- return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
@@ -420,55 +424,30 @@ func skipMetrics(dAtA []byte) (n int, err error) {
return 0, ErrInvalidLengthMetrics
}
iNdEx += length
- if iNdEx < 0 {
- return 0, ErrInvalidLengthMetrics
- }
- return iNdEx, nil
case 3:
- for {
- var innerWire uint64
- var start int = iNdEx
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- innerWire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- innerWireType := int(innerWire & 0x7)
- if innerWireType == 4 {
- break
- }
- next, err := skipMetrics(dAtA[start:])
- if err != nil {
- return 0, err
- }
- iNdEx = start + next
- if iNdEx < 0 {
- return 0, ErrInvalidLengthMetrics
- }
- }
- return iNdEx, nil
+ depth++
case 4:
- return iNdEx, nil
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupMetrics
+ }
+ depth--
case 5:
iNdEx += 4
- return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthMetrics
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
}
- panic("unreachable")
+ return 0, io.ErrUnexpectedEOF
}
var (
- ErrInvalidLengthMetrics = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowMetrics = fmt.Errorf("proto: integer overflow")
+ ErrInvalidLengthMetrics = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowMetrics = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupMetrics = fmt.Errorf("proto: unexpected end of group")
)
diff --git a/vendor/github.com/containerd/containerd/api/types/mount.pb.go b/vendor/github.com/containerd/containerd/api/types/mount.pb.go
index 54af8ea2f..6872e4120 100644
--- a/vendor/github.com/containerd/containerd/api/types/mount.pb.go
+++ b/vendor/github.com/containerd/containerd/api/types/mount.pb.go
@@ -8,6 +8,7 @@ import (
proto "github.com/gogo/protobuf/proto"
io "io"
math "math"
+ math_bits "math/bits"
reflect "reflect"
strings "strings"
)
@@ -21,7 +22,7 @@ var _ = math.Inf
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
// Mount describes mounts for a container.
//
@@ -58,7 +59,7 @@ func (m *Mount) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Mount.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -105,7 +106,7 @@ var fileDescriptor_920196890d4a7b9f = []byte{
func (m *Mount) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -113,57 +114,62 @@ func (m *Mount) Marshal() (dAtA []byte, err error) {
}
func (m *Mount) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Mount) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.Type) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintMount(dAtA, i, uint64(len(m.Type)))
- i += copy(dAtA[i:], m.Type)
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
- if len(m.Source) > 0 {
- dAtA[i] = 0x12
- i++
- i = encodeVarintMount(dAtA, i, uint64(len(m.Source)))
- i += copy(dAtA[i:], m.Source)
+ if len(m.Options) > 0 {
+ for iNdEx := len(m.Options) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Options[iNdEx])
+ copy(dAtA[i:], m.Options[iNdEx])
+ i = encodeVarintMount(dAtA, i, uint64(len(m.Options[iNdEx])))
+ i--
+ dAtA[i] = 0x22
+ }
}
if len(m.Target) > 0 {
- dAtA[i] = 0x1a
- i++
+ i -= len(m.Target)
+ copy(dAtA[i:], m.Target)
i = encodeVarintMount(dAtA, i, uint64(len(m.Target)))
- i += copy(dAtA[i:], m.Target)
+ i--
+ dAtA[i] = 0x1a
}
- if len(m.Options) > 0 {
- for _, s := range m.Options {
- dAtA[i] = 0x22
- i++
- l = len(s)
- for l >= 1<<7 {
- dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
- l >>= 7
- i++
- }
- dAtA[i] = uint8(l)
- i++
- i += copy(dAtA[i:], s)
- }
+ if len(m.Source) > 0 {
+ i -= len(m.Source)
+ copy(dAtA[i:], m.Source)
+ i = encodeVarintMount(dAtA, i, uint64(len(m.Source)))
+ i--
+ dAtA[i] = 0x12
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if len(m.Type) > 0 {
+ i -= len(m.Type)
+ copy(dAtA[i:], m.Type)
+ i = encodeVarintMount(dAtA, i, uint64(len(m.Type)))
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func encodeVarintMount(dAtA []byte, offset int, v uint64) int {
+ offset -= sovMount(v)
+ base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
- return offset + 1
+ return base
}
func (m *Mount) Size() (n int) {
if m == nil {
@@ -196,14 +202,7 @@ func (m *Mount) Size() (n int) {
}
func sovMount(x uint64) (n int) {
- for {
- n++
- x >>= 7
- if x == 0 {
- break
- }
- }
- return n
+ return (math_bits.Len64(x|1) + 6) / 7
}
func sozMount(x uint64) (n int) {
return sovMount(uint64((x << 1) ^ uint64((int64(x) >> 63))))
@@ -415,6 +414,7 @@ func (m *Mount) Unmarshal(dAtA []byte) error {
func skipMount(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
+ depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
@@ -446,10 +446,8 @@ func skipMount(dAtA []byte) (n int, err error) {
break
}
}
- return iNdEx, nil
case 1:
iNdEx += 8
- return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
@@ -470,55 +468,30 @@ func skipMount(dAtA []byte) (n int, err error) {
return 0, ErrInvalidLengthMount
}
iNdEx += length
- if iNdEx < 0 {
- return 0, ErrInvalidLengthMount
- }
- return iNdEx, nil
case 3:
- for {
- var innerWire uint64
- var start int = iNdEx
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowMount
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- innerWire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- innerWireType := int(innerWire & 0x7)
- if innerWireType == 4 {
- break
- }
- next, err := skipMount(dAtA[start:])
- if err != nil {
- return 0, err
- }
- iNdEx = start + next
- if iNdEx < 0 {
- return 0, ErrInvalidLengthMount
- }
- }
- return iNdEx, nil
+ depth++
case 4:
- return iNdEx, nil
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupMount
+ }
+ depth--
case 5:
iNdEx += 4
- return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthMount
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
}
- panic("unreachable")
+ return 0, io.ErrUnexpectedEOF
}
var (
- ErrInvalidLengthMount = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowMount = fmt.Errorf("proto: integer overflow")
+ ErrInvalidLengthMount = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowMount = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupMount = fmt.Errorf("proto: unexpected end of group")
)
diff --git a/vendor/github.com/containerd/containerd/api/types/platform.pb.go b/vendor/github.com/containerd/containerd/api/types/platform.pb.go
index 558f94732..c03d8b077 100644
--- a/vendor/github.com/containerd/containerd/api/types/platform.pb.go
+++ b/vendor/github.com/containerd/containerd/api/types/platform.pb.go
@@ -8,6 +8,7 @@ import (
proto "github.com/gogo/protobuf/proto"
io "io"
math "math"
+ math_bits "math/bits"
reflect "reflect"
strings "strings"
)
@@ -21,7 +22,7 @@ var _ = math.Inf
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
// Platform follows the structure of the OCI platform specification, from
// descriptors.
@@ -47,7 +48,7 @@ func (m *Platform) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Platform.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -94,7 +95,7 @@ var fileDescriptor_24ba7a4b83e2367e = []byte{
func (m *Platform) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -102,42 +103,53 @@ func (m *Platform) Marshal() (dAtA []byte, err error) {
}
func (m *Platform) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Platform) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.OS) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintPlatform(dAtA, i, uint64(len(m.OS)))
- i += copy(dAtA[i:], m.OS)
- }
- if len(m.Architecture) > 0 {
- dAtA[i] = 0x12
- i++
- i = encodeVarintPlatform(dAtA, i, uint64(len(m.Architecture)))
- i += copy(dAtA[i:], m.Architecture)
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
if len(m.Variant) > 0 {
- dAtA[i] = 0x1a
- i++
+ i -= len(m.Variant)
+ copy(dAtA[i:], m.Variant)
i = encodeVarintPlatform(dAtA, i, uint64(len(m.Variant)))
- i += copy(dAtA[i:], m.Variant)
+ i--
+ dAtA[i] = 0x1a
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if len(m.Architecture) > 0 {
+ i -= len(m.Architecture)
+ copy(dAtA[i:], m.Architecture)
+ i = encodeVarintPlatform(dAtA, i, uint64(len(m.Architecture)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.OS) > 0 {
+ i -= len(m.OS)
+ copy(dAtA[i:], m.OS)
+ i = encodeVarintPlatform(dAtA, i, uint64(len(m.OS)))
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func encodeVarintPlatform(dAtA []byte, offset int, v uint64) int {
+ offset -= sovPlatform(v)
+ base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
- return offset + 1
+ return base
}
func (m *Platform) Size() (n int) {
if m == nil {
@@ -164,14 +176,7 @@ func (m *Platform) Size() (n int) {
}
func sovPlatform(x uint64) (n int) {
- for {
- n++
- x >>= 7
- if x == 0 {
- break
- }
- }
- return n
+ return (math_bits.Len64(x|1) + 6) / 7
}
func sozPlatform(x uint64) (n int) {
return sovPlatform(uint64((x << 1) ^ uint64((int64(x) >> 63))))
@@ -350,6 +355,7 @@ func (m *Platform) Unmarshal(dAtA []byte) error {
func skipPlatform(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
+ depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
@@ -381,10 +387,8 @@ func skipPlatform(dAtA []byte) (n int, err error) {
break
}
}
- return iNdEx, nil
case 1:
iNdEx += 8
- return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
@@ -405,55 +409,30 @@ func skipPlatform(dAtA []byte) (n int, err error) {
return 0, ErrInvalidLengthPlatform
}
iNdEx += length
- if iNdEx < 0 {
- return 0, ErrInvalidLengthPlatform
- }
- return iNdEx, nil
case 3:
- for {
- var innerWire uint64
- var start int = iNdEx
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowPlatform
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- innerWire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- innerWireType := int(innerWire & 0x7)
- if innerWireType == 4 {
- break
- }
- next, err := skipPlatform(dAtA[start:])
- if err != nil {
- return 0, err
- }
- iNdEx = start + next
- if iNdEx < 0 {
- return 0, ErrInvalidLengthPlatform
- }
- }
- return iNdEx, nil
+ depth++
case 4:
- return iNdEx, nil
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupPlatform
+ }
+ depth--
case 5:
iNdEx += 4
- return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthPlatform
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
}
- panic("unreachable")
+ return 0, io.ErrUnexpectedEOF
}
var (
- ErrInvalidLengthPlatform = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowPlatform = fmt.Errorf("proto: integer overflow")
+ ErrInvalidLengthPlatform = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowPlatform = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupPlatform = fmt.Errorf("proto: unexpected end of group")
)
diff --git a/vendor/github.com/containerd/containerd/api/types/task/task.pb.go b/vendor/github.com/containerd/containerd/api/types/task/task.pb.go
index 69d851c93..ae824ff45 100644
--- a/vendor/github.com/containerd/containerd/api/types/task/task.pb.go
+++ b/vendor/github.com/containerd/containerd/api/types/task/task.pb.go
@@ -10,6 +10,7 @@ import (
types "github.com/gogo/protobuf/types"
io "io"
math "math"
+ math_bits "math/bits"
reflect "reflect"
strings "strings"
time "time"
@@ -25,7 +26,7 @@ var _ = time.Kitchen
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
type Status int32
@@ -93,7 +94,7 @@ func (m *Process) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Process.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -137,7 +138,7 @@ func (m *ProcessInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
return xxx_messageInfo_ProcessInfo.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -208,7 +209,7 @@ var fileDescriptor_391ef18c8ab0dc16 = []byte{
func (m *Process) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -216,83 +217,94 @@ func (m *Process) Marshal() (dAtA []byte, err error) {
}
func (m *Process) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Process) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.ContainerID) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintTask(dAtA, i, uint64(len(m.ContainerID)))
- i += copy(dAtA[i:], m.ContainerID)
- }
- if len(m.ID) > 0 {
- dAtA[i] = 0x12
- i++
- i = encodeVarintTask(dAtA, i, uint64(len(m.ID)))
- i += copy(dAtA[i:], m.ID)
- }
- if m.Pid != 0 {
- dAtA[i] = 0x18
- i++
- i = encodeVarintTask(dAtA, i, uint64(m.Pid))
- }
- if m.Status != 0 {
- dAtA[i] = 0x20
- i++
- i = encodeVarintTask(dAtA, i, uint64(m.Status))
- }
- if len(m.Stdin) > 0 {
- dAtA[i] = 0x2a
- i++
- i = encodeVarintTask(dAtA, i, uint64(len(m.Stdin)))
- i += copy(dAtA[i:], m.Stdin)
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
- if len(m.Stdout) > 0 {
- dAtA[i] = 0x32
- i++
- i = encodeVarintTask(dAtA, i, uint64(len(m.Stdout)))
- i += copy(dAtA[i:], m.Stdout)
+ n1, err1 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.ExitedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt):])
+ if err1 != nil {
+ return 0, err1
}
- if len(m.Stderr) > 0 {
- dAtA[i] = 0x3a
- i++
- i = encodeVarintTask(dAtA, i, uint64(len(m.Stderr)))
- i += copy(dAtA[i:], m.Stderr)
+ i -= n1
+ i = encodeVarintTask(dAtA, i, uint64(n1))
+ i--
+ dAtA[i] = 0x52
+ if m.ExitStatus != 0 {
+ i = encodeVarintTask(dAtA, i, uint64(m.ExitStatus))
+ i--
+ dAtA[i] = 0x48
}
if m.Terminal {
- dAtA[i] = 0x40
- i++
+ i--
if m.Terminal {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
- i++
+ i--
+ dAtA[i] = 0x40
}
- if m.ExitStatus != 0 {
- dAtA[i] = 0x48
- i++
- i = encodeVarintTask(dAtA, i, uint64(m.ExitStatus))
+ if len(m.Stderr) > 0 {
+ i -= len(m.Stderr)
+ copy(dAtA[i:], m.Stderr)
+ i = encodeVarintTask(dAtA, i, uint64(len(m.Stderr)))
+ i--
+ dAtA[i] = 0x3a
}
- dAtA[i] = 0x52
- i++
- i = encodeVarintTask(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt)))
- n1, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.ExitedAt, dAtA[i:])
- if err != nil {
- return 0, err
+ if len(m.Stdout) > 0 {
+ i -= len(m.Stdout)
+ copy(dAtA[i:], m.Stdout)
+ i = encodeVarintTask(dAtA, i, uint64(len(m.Stdout)))
+ i--
+ dAtA[i] = 0x32
}
- i += n1
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if len(m.Stdin) > 0 {
+ i -= len(m.Stdin)
+ copy(dAtA[i:], m.Stdin)
+ i = encodeVarintTask(dAtA, i, uint64(len(m.Stdin)))
+ i--
+ dAtA[i] = 0x2a
+ }
+ if m.Status != 0 {
+ i = encodeVarintTask(dAtA, i, uint64(m.Status))
+ i--
+ dAtA[i] = 0x20
+ }
+ if m.Pid != 0 {
+ i = encodeVarintTask(dAtA, i, uint64(m.Pid))
+ i--
+ dAtA[i] = 0x18
+ }
+ if len(m.ID) > 0 {
+ i -= len(m.ID)
+ copy(dAtA[i:], m.ID)
+ i = encodeVarintTask(dAtA, i, uint64(len(m.ID)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.ContainerID) > 0 {
+ i -= len(m.ContainerID)
+ copy(dAtA[i:], m.ContainerID)
+ i = encodeVarintTask(dAtA, i, uint64(len(m.ContainerID)))
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *ProcessInfo) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -300,39 +312,49 @@ func (m *ProcessInfo) Marshal() (dAtA []byte, err error) {
}
func (m *ProcessInfo) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ProcessInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if m.Pid != 0 {
- dAtA[i] = 0x8
- i++
- i = encodeVarintTask(dAtA, i, uint64(m.Pid))
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
if m.Info != nil {
- dAtA[i] = 0x12
- i++
- i = encodeVarintTask(dAtA, i, uint64(m.Info.Size()))
- n2, err := m.Info.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
+ {
+ size, err := m.Info.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintTask(dAtA, i, uint64(size))
}
- i += n2
+ i--
+ dAtA[i] = 0x12
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if m.Pid != 0 {
+ i = encodeVarintTask(dAtA, i, uint64(m.Pid))
+ i--
+ dAtA[i] = 0x8
}
- return i, nil
+ return len(dAtA) - i, nil
}
func encodeVarintTask(dAtA []byte, offset int, v uint64) int {
+ offset -= sovTask(v)
+ base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
- return offset + 1
+ return base
}
func (m *Process) Size() (n int) {
if m == nil {
@@ -400,14 +422,7 @@ func (m *ProcessInfo) Size() (n int) {
}
func sovTask(x uint64) (n int) {
- for {
- n++
- x >>= 7
- if x == 0 {
- break
- }
- }
- return n
+ return (math_bits.Len64(x|1) + 6) / 7
}
func sozTask(x uint64) (n int) {
return sovTask(uint64((x << 1) ^ uint64((int64(x) >> 63))))
@@ -426,7 +441,7 @@ func (this *Process) String() string {
`Stderr:` + fmt.Sprintf("%v", this.Stderr) + `,`,
`Terminal:` + fmt.Sprintf("%v", this.Terminal) + `,`,
`ExitStatus:` + fmt.Sprintf("%v", this.ExitStatus) + `,`,
- `ExitedAt:` + strings.Replace(strings.Replace(this.ExitedAt.String(), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`,
+ `ExitedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ExitedAt), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`,
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
`}`,
}, "")
@@ -888,6 +903,7 @@ func (m *ProcessInfo) Unmarshal(dAtA []byte) error {
func skipTask(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
+ depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
@@ -919,10 +935,8 @@ func skipTask(dAtA []byte) (n int, err error) {
break
}
}
- return iNdEx, nil
case 1:
iNdEx += 8
- return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
@@ -943,55 +957,30 @@ func skipTask(dAtA []byte) (n int, err error) {
return 0, ErrInvalidLengthTask
}
iNdEx += length
- if iNdEx < 0 {
- return 0, ErrInvalidLengthTask
- }
- return iNdEx, nil
case 3:
- for {
- var innerWire uint64
- var start int = iNdEx
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowTask
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- innerWire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- innerWireType := int(innerWire & 0x7)
- if innerWireType == 4 {
- break
- }
- next, err := skipTask(dAtA[start:])
- if err != nil {
- return 0, err
- }
- iNdEx = start + next
- if iNdEx < 0 {
- return 0, ErrInvalidLengthTask
- }
- }
- return iNdEx, nil
+ depth++
case 4:
- return iNdEx, nil
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupTask
+ }
+ depth--
case 5:
iNdEx += 4
- return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthTask
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
}
- panic("unreachable")
+ return 0, io.ErrUnexpectedEOF
}
var (
- ErrInvalidLengthTask = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowTask = fmt.Errorf("proto: integer overflow")
+ ErrInvalidLengthTask = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowTask = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupTask = fmt.Errorf("proto: unexpected end of group")
)
diff --git a/vendor/github.com/containerd/containerd/client.go b/vendor/github.com/containerd/containerd/client.go
index f65b03827..1987e45c0 100644
--- a/vendor/github.com/containerd/containerd/client.go
+++ b/vendor/github.com/containerd/containerd/client.go
@@ -64,6 +64,7 @@ import (
specs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
"google.golang.org/grpc"
+ "google.golang.org/grpc/backoff"
"google.golang.org/grpc/health/grpc_health_v1"
)
@@ -110,11 +111,16 @@ func New(address string, opts ...ClientOpt) (*Client, error) {
c.services = *copts.services
}
if address != "" {
+ backoffConfig := backoff.DefaultConfig
+ backoffConfig.MaxDelay = 3 * time.Second
+ connParams := grpc.ConnectParams{
+ Backoff: backoffConfig,
+ }
gopts := []grpc.DialOption{
grpc.WithBlock(),
grpc.WithInsecure(),
grpc.FailOnNonTempDialError(true),
- grpc.WithBackoffMaxDelay(3 * time.Second),
+ grpc.WithConnectParams(connParams),
grpc.WithContextDialer(dialer.ContextDialer),
// TODO(stevvooe): We may need to allow configuration of this on the client.
@@ -390,7 +396,11 @@ func (c *Client) Fetch(ctx context.Context, ref string, opts ...RemoteOpt) (imag
}
defer done(ctx)
- return c.fetch(ctx, fetchCtx, ref, 0)
+ img, err := c.fetch(ctx, fetchCtx, ref, 0)
+ if err != nil {
+ return images.Image{}, err
+ }
+ return c.createNewImage(ctx, img)
}
// Push uploads the provided content to a remote resource
diff --git a/vendor/github.com/containerd/containerd/cmd/containerd/command/publish.go b/vendor/github.com/containerd/containerd/cmd/containerd/command/publish.go
index fda424b18..82e75f35e 100644
--- a/vendor/github.com/containerd/containerd/cmd/containerd/command/publish.go
+++ b/vendor/github.com/containerd/containerd/cmd/containerd/command/publish.go
@@ -32,6 +32,7 @@ import (
"github.com/pkg/errors"
"github.com/urfave/cli"
"google.golang.org/grpc"
+ "google.golang.org/grpc/backoff"
)
var publishCommand = cli.Command{
@@ -92,12 +93,17 @@ func connectEvents(address string) (eventsapi.EventsClient, error) {
}
func connect(address string, d func(gocontext.Context, string) (net.Conn, error)) (*grpc.ClientConn, error) {
+ backoffConfig := backoff.DefaultConfig
+ backoffConfig.MaxDelay = 3 * time.Second
+ connParams := grpc.ConnectParams{
+ Backoff: backoffConfig,
+ }
gopts := []grpc.DialOption{
grpc.WithBlock(),
grpc.WithInsecure(),
grpc.WithContextDialer(d),
grpc.FailOnNonTempDialError(true),
- grpc.WithBackoffMaxDelay(3 * time.Second),
+ grpc.WithConnectParams(connParams),
}
ctx, cancel := gocontext.WithTimeout(gocontext.Background(), 2*time.Second)
defer cancel()
diff --git a/vendor/github.com/containerd/containerd/container.go b/vendor/github.com/containerd/containerd/container.go
index fd880d0e0..187934ead 100644
--- a/vendor/github.com/containerd/containerd/container.go
+++ b/vendor/github.com/containerd/containerd/container.go
@@ -36,6 +36,7 @@ import (
prototypes "github.com/gogo/protobuf/types"
ver "github.com/opencontainers/image-spec/specs-go"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/opencontainers/selinux/go-selinux/label"
"github.com/pkg/errors"
)
@@ -242,7 +243,17 @@ func (c *container) NewTask(ctx context.Context, ioCreate cio.Creator, opts ...N
if err != nil {
return nil, err
}
+ spec, err := c.Spec(ctx)
+ if err != nil {
+ return nil, err
+ }
for _, m := range mounts {
+ if spec.Linux != nil && spec.Linux.MountLabel != "" {
+ context := label.FormatMountLabel("", spec.Linux.MountLabel)
+ if context != "" {
+ m.Options = append(m.Options, context)
+ }
+ }
request.Rootfs = append(request.Rootfs, &types.Mount{
Type: m.Type,
Source: m.Source,
diff --git a/vendor/github.com/containerd/containerd/metrics/cgroups/v2/io.go b/vendor/github.com/containerd/containerd/metrics/cgroups/v2/io.go
new file mode 100644
index 000000000..79980a580
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/metrics/cgroups/v2/io.go
@@ -0,0 +1,110 @@
+// +build linux
+
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package v2
+
+import (
+ "strconv"
+
+ v2 "github.com/containerd/containerd/metrics/types/v2"
+ metrics "github.com/docker/go-metrics"
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+var ioMetrics = []*metric{
+ {
+ name: "io_rbytes",
+ help: "IO bytes read",
+ unit: metrics.Bytes,
+ vt: prometheus.GaugeValue,
+ labels: []string{"major", "minor"},
+ getValues: func(stats *v2.Metrics) []value {
+ if stats.Io == nil {
+ return nil
+ }
+ var out []value
+ for _, e := range stats.Io.Usage {
+ out = append(out, value{
+ v: float64(e.Rbytes),
+ l: []string{strconv.FormatUint(e.Major, 10), strconv.FormatUint(e.Minor, 10)},
+ })
+ }
+ return out
+ },
+ },
+ {
+ name: "io_wbytes",
+ help: "IO bytes written",
+ unit: metrics.Bytes,
+ vt: prometheus.GaugeValue,
+ labels: []string{"major", "minor"},
+ getValues: func(stats *v2.Metrics) []value {
+ if stats.Io == nil {
+ return nil
+ }
+ var out []value
+ for _, e := range stats.Io.Usage {
+ out = append(out, value{
+ v: float64(e.Wbytes),
+ l: []string{strconv.FormatUint(e.Major, 10), strconv.FormatUint(e.Minor, 10)},
+ })
+ }
+ return out
+ },
+ },
+ {
+ name: "io_rios",
+ help: "Number of read IOs",
+ unit: metrics.Total,
+ vt: prometheus.GaugeValue,
+ labels: []string{"major", "minor"},
+ getValues: func(stats *v2.Metrics) []value {
+ if stats.Io == nil {
+ return nil
+ }
+ var out []value
+ for _, e := range stats.Io.Usage {
+ out = append(out, value{
+ v: float64(e.Rios),
+ l: []string{strconv.FormatUint(e.Major, 10), strconv.FormatUint(e.Minor, 10)},
+ })
+ }
+ return out
+ },
+ },
+ {
+ name: "io_wios",
+ help: "Number of write IOs",
+ unit: metrics.Total,
+ vt: prometheus.GaugeValue,
+ labels: []string{"major", "minor"},
+ getValues: func(stats *v2.Metrics) []value {
+ if stats.Io == nil {
+ return nil
+ }
+ var out []value
+ for _, e := range stats.Io.Usage {
+ out = append(out, value{
+ v: float64(e.Wios),
+ l: []string{strconv.FormatUint(e.Major, 10), strconv.FormatUint(e.Minor, 10)},
+ })
+ }
+ return out
+ },
+ },
+}
diff --git a/vendor/github.com/containerd/containerd/metrics/cgroups/v2/metrics.go b/vendor/github.com/containerd/containerd/metrics/cgroups/v2/metrics.go
index bfb6f0553..e352cd1fc 100644
--- a/vendor/github.com/containerd/containerd/metrics/cgroups/v2/metrics.go
+++ b/vendor/github.com/containerd/containerd/metrics/cgroups/v2/metrics.go
@@ -45,6 +45,7 @@ func newCollector(ns *metrics.Namespace) *collector {
c.metrics = append(c.metrics, pidMetrics...)
c.metrics = append(c.metrics, cpuMetrics...)
c.metrics = append(c.metrics, memoryMetrics...)
+ c.metrics = append(c.metrics, ioMetrics...)
c.storedMetrics = make(chan prometheus.Metric, 100*len(c.metrics))
ns.Add(c)
return c
diff --git a/vendor/github.com/containerd/containerd/metrics/types/v2/types.go b/vendor/github.com/containerd/containerd/metrics/types/v2/types.go
index da54076b5..eac8d4602 100644
--- a/vendor/github.com/containerd/containerd/metrics/types/v2/types.go
+++ b/vendor/github.com/containerd/containerd/metrics/types/v2/types.go
@@ -31,4 +31,6 @@ type (
CPUStat = v2.CPUStat
// PidsStat alias
PidsStat = v2.PidsStat
+ // IOStat alias
+ IOStat = v2.IOStat
)
diff --git a/vendor/github.com/containerd/containerd/oci/spec_opts.go b/vendor/github.com/containerd/containerd/oci/spec_opts.go
index 4c14c5570..59dbfdb19 100644
--- a/vendor/github.com/containerd/containerd/oci/spec_opts.go
+++ b/vendor/github.com/containerd/containerd/oci/spec_opts.go
@@ -91,6 +91,21 @@ func setResources(s *Spec) {
}
}
+// nolint
+func setCPU(s *Spec) {
+ setResources(s)
+ if s.Linux != nil {
+ if s.Linux.Resources.CPU == nil {
+ s.Linux.Resources.CPU = &specs.LinuxCPU{}
+ }
+ }
+ if s.Windows != nil {
+ if s.Windows.Resources.CPU == nil {
+ s.Windows.Resources.CPU = &specs.WindowsCPUResources{}
+ }
+ }
+}
+
// setCapabilities sets Linux Capabilities to empty if unset
func setCapabilities(s *Spec) {
setProcess(s)
diff --git a/vendor/github.com/containerd/containerd/oci/spec_opts_linux.go b/vendor/github.com/containerd/containerd/oci/spec_opts_linux.go
index 1448ee78f..b1eab6f9f 100644
--- a/vendor/github.com/containerd/containerd/oci/spec_opts_linux.go
+++ b/vendor/github.com/containerd/containerd/oci/spec_opts_linux.go
@@ -119,3 +119,64 @@ func deviceFromPath(path, permissions string) (*specs.LinuxDevice, error) {
GID: &stat.Gid,
}, nil
}
+
+// WithMemorySwap sets the container's swap in bytes
+func WithMemorySwap(swap int64) SpecOpts {
+ return func(ctx context.Context, _ Client, c *containers.Container, s *Spec) error {
+ setResources(s)
+ if s.Linux.Resources.Memory == nil {
+ s.Linux.Resources.Memory = &specs.LinuxMemory{}
+ }
+ s.Linux.Resources.Memory.Swap = &swap
+ return nil
+ }
+}
+
+// WithPidsLimit sets the container's pid limit or maximum
+func WithPidsLimit(limit int64) SpecOpts {
+ return func(ctx context.Context, _ Client, c *containers.Container, s *Spec) error {
+ setResources(s)
+ if s.Linux.Resources.Pids == nil {
+ s.Linux.Resources.Pids = &specs.LinuxPids{}
+ }
+ s.Linux.Resources.Pids.Limit = limit
+ return nil
+ }
+}
+
+// WithCPUShares sets the container's cpu shares
+func WithCPUShares(shares uint64) SpecOpts {
+ return func(ctx context.Context, _ Client, c *containers.Container, s *Spec) error {
+ setCPU(s)
+ s.Linux.Resources.CPU.Shares = &shares
+ return nil
+ }
+}
+
+// WithCPUs sets the container's cpus/cores for use by the container
+func WithCPUs(cpus string) SpecOpts {
+ return func(ctx context.Context, _ Client, c *containers.Container, s *Spec) error {
+ setCPU(s)
+ s.Linux.Resources.CPU.Cpus = cpus
+ return nil
+ }
+}
+
+// WithCPUsMems sets the container's cpu mems for use by the container
+func WithCPUsMems(mems string) SpecOpts {
+ return func(ctx context.Context, _ Client, c *containers.Container, s *Spec) error {
+ setCPU(s)
+ s.Linux.Resources.CPU.Mems = mems
+ return nil
+ }
+}
+
+// WithCPUCFS sets the container's Completely fair scheduling (CFS) quota and period
+func WithCPUCFS(quota int64, period uint64) SpecOpts {
+ return func(ctx context.Context, _ Client, c *containers.Container, s *Spec) error {
+ setCPU(s)
+ s.Linux.Resources.CPU.Quota = "a
+ s.Linux.Resources.CPU.Period = &period
+ return nil
+ }
+}
diff --git a/vendor/github.com/containerd/containerd/pkg/dialer/dialer_windows.go b/vendor/github.com/containerd/containerd/pkg/dialer/dialer_windows.go
index 64d30dea0..4dd296ebc 100644
--- a/vendor/github.com/containerd/containerd/pkg/dialer/dialer_windows.go
+++ b/vendor/github.com/containerd/containerd/pkg/dialer/dialer_windows.go
@@ -19,21 +19,13 @@ package dialer
import (
"net"
"os"
- "syscall"
"time"
winio "github.com/Microsoft/go-winio"
)
func isNoent(err error) bool {
- if err != nil {
- if oerr, ok := err.(*os.PathError); ok {
- if oerr.Err == syscall.ENOENT {
- return true
- }
- }
- }
- return false
+ return os.IsNotExist(err)
}
func dialer(address string, timeout time.Duration) (net.Conn, error) {
diff --git a/vendor/github.com/containerd/containerd/pkg/ttrpcutil/client.go b/vendor/github.com/containerd/containerd/pkg/ttrpcutil/client.go
index ba3d51d51..5edb19dc3 100644
--- a/vendor/github.com/containerd/containerd/pkg/ttrpcutil/client.go
+++ b/vendor/github.com/containerd/containerd/pkg/ttrpcutil/client.go
@@ -21,6 +21,7 @@ import (
"time"
v1 "github.com/containerd/containerd/api/services/ttrpc/events/v1"
+ "github.com/containerd/containerd/pkg/dialer"
"github.com/containerd/ttrpc"
"github.com/pkg/errors"
)
@@ -40,7 +41,7 @@ type Client struct {
// NewClient returns a new containerd TTRPC client that is connected to the containerd instance provided by address
func NewClient(address string, opts ...ttrpc.ClientOpts) (*Client, error) {
connector := func() (*ttrpc.Client, error) {
- conn, err := ttrpcDial(address, ttrpcDialTimeout)
+ conn, err := dialer.Dialer(address, ttrpcDialTimeout)
if err != nil {
return nil, errors.Wrap(err, "failed to connect")
}
diff --git a/vendor/github.com/containerd/containerd/pkg/ttrpcutil/client_unix.go b/vendor/github.com/containerd/containerd/pkg/ttrpcutil/client_unix.go
deleted file mode 100644
index 16fb64954..000000000
--- a/vendor/github.com/containerd/containerd/pkg/ttrpcutil/client_unix.go
+++ /dev/null
@@ -1,30 +0,0 @@
-// +build !windows
-
-/*
- Copyright The containerd Authors.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-
-package ttrpcutil
-
-import (
- "net"
- "strings"
- "time"
-)
-
-func ttrpcDial(address string, timeout time.Duration) (net.Conn, error) {
- address = strings.TrimPrefix(address, "unix://")
- return net.DialTimeout("unix", address, timeout)
-}
diff --git a/vendor/github.com/containerd/containerd/pkg/ttrpcutil/client_windows.go b/vendor/github.com/containerd/containerd/pkg/ttrpcutil/client_windows.go
deleted file mode 100644
index b8c69ef0a..000000000
--- a/vendor/github.com/containerd/containerd/pkg/ttrpcutil/client_windows.go
+++ /dev/null
@@ -1,59 +0,0 @@
-// +build windows
-
-/*
- Copyright The containerd Authors.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-
-package ttrpcutil
-
-import (
- "context"
- "net"
- "os"
- "time"
-
- winio "github.com/Microsoft/go-winio"
- "github.com/pkg/errors"
-)
-
-func ttrpcDial(address string, timeout time.Duration) (net.Conn, error) {
- ctx, cancel := context.WithTimeout(context.Background(), timeout)
- defer cancel()
-
- // If there is nobody serving the pipe we limit the timeout for this case to
- // 5 seconds because any shim that would serve this endpoint should serve it
- // within 5 seconds.
- serveTimer := time.NewTimer(5 * time.Second)
- defer serveTimer.Stop()
- for {
- c, err := winio.DialPipeContext(ctx, address)
- if err != nil {
- if os.IsNotExist(err) {
- select {
- case <-serveTimer.C:
- return nil, errors.Wrap(os.ErrNotExist, "pipe not found before timeout")
- default:
- // Wait 10ms for the shim to serve and try again.
- time.Sleep(10 * time.Millisecond)
- continue
- }
- } else if err == context.DeadlineExceeded {
- return nil, errors.Wrapf(err, "timed out waiting for npipe %s", address)
- }
- return nil, err
- }
- return c, nil
- }
-}
diff --git a/vendor/github.com/containerd/containerd/platforms/cpuinfo.go b/vendor/github.com/containerd/containerd/platforms/cpuinfo.go
index c4c9c9df9..ffa8970a8 100644
--- a/vendor/github.com/containerd/containerd/platforms/cpuinfo.go
+++ b/vendor/github.com/containerd/containerd/platforms/cpuinfo.go
@@ -98,7 +98,12 @@ func getCPUVariant() string {
switch strings.ToLower(variant) {
case "8", "aarch64":
- variant = "v8"
+ // special case: if running a 32-bit userspace on aarch64, the variant should be "v7"
+ if runtime.GOARCH == "arm" {
+ variant = "v7"
+ } else {
+ variant = "v8"
+ }
case "7", "7m", "?(12)", "?(13)", "?(14)", "?(15)", "?(16)", "?(17)":
variant = "v7"
case "6", "6tej":
diff --git a/vendor/github.com/containerd/containerd/pull.go b/vendor/github.com/containerd/containerd/pull.go
index 02d6f4e9b..c4cd919b5 100644
--- a/vendor/github.com/containerd/containerd/pull.go
+++ b/vendor/github.com/containerd/containerd/pull.go
@@ -27,6 +27,7 @@ import (
"github.com/containerd/containerd/remotes/docker/schema1"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
+ "golang.org/x/sync/errgroup"
"golang.org/x/sync/semaphore"
)
@@ -62,15 +63,18 @@ func (c *Client) Pull(ctx context.Context, ref string, opts ...RemoteOpt) (_ Ima
defer done(ctx)
var unpacks int32
+ var unpackEg *errgroup.Group
+ var unpackWrapper func(f images.Handler) images.Handler
+
if pullCtx.Unpack {
// unpacker only supports schema 2 image, for schema 1 this is noop.
u, err := c.newUnpacker(ctx, pullCtx)
if err != nil {
return nil, errors.Wrap(err, "create unpacker")
}
- unpackWrapper, eg := u.handlerWrapper(ctx, &unpacks)
+ unpackWrapper, unpackEg = u.handlerWrapper(ctx, &unpacks)
defer func() {
- if err := eg.Wait(); err != nil {
+ if err := unpackEg.Wait(); err != nil {
if retErr == nil {
retErr = errors.Wrap(err, "unpack")
}
@@ -90,6 +94,22 @@ func (c *Client) Pull(ctx context.Context, ref string, opts ...RemoteOpt) (_ Ima
return nil, err
}
+ // NOTE(fuweid): unpacker defers blobs download. before create image
+ // record in ImageService, should wait for unpacking(including blobs
+ // download).
+ if pullCtx.Unpack {
+ if unpackEg != nil {
+ if err := unpackEg.Wait(); err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ img, err = c.createNewImage(ctx, img)
+ if err != nil {
+ return nil, err
+ }
+
i := NewImageWithPlatform(c, img, pullCtx.PlatformMatcher)
if pullCtx.Unpack {
@@ -201,12 +221,14 @@ func (c *Client) fetch(ctx context.Context, rCtx *RemoteContext, ref string, lim
}
}
- img := images.Image{
+ return images.Image{
Name: name,
Target: desc,
Labels: rCtx.Labels,
- }
+ }, nil
+}
+func (c *Client) createNewImage(ctx context.Context, img images.Image) (images.Image, error) {
is := c.ImageService()
for {
if created, err := is.Create(ctx, img); err != nil {
diff --git a/vendor/github.com/containerd/containerd/remotes/docker/pusher.go b/vendor/github.com/containerd/containerd/remotes/docker/pusher.go
index e87841bd4..96a45d390 100644
--- a/vendor/github.com/containerd/containerd/remotes/docker/pusher.go
+++ b/vendor/github.com/containerd/containerd/remotes/docker/pusher.go
@@ -204,6 +204,7 @@ func (p dockerPusher) Push(ctx context.Context, desc ocispec.Descriptor) (conten
q.Add("digest", desc.Digest.String())
req = p.request(lhost, http.MethodPut)
+ req.header.Set("Content-Type", "application/octet-stream")
req.path = lurl.Path + "?" + q.Encode()
}
p.tracker.SetStatus(ref, Status{
diff --git a/vendor/github.com/containerd/containerd/runtime/linux/runctypes/runc.pb.go b/vendor/github.com/containerd/containerd/runtime/linux/runctypes/runc.pb.go
index 96dfebe6b..26306e594 100644
--- a/vendor/github.com/containerd/containerd/runtime/linux/runctypes/runc.pb.go
+++ b/vendor/github.com/containerd/containerd/runtime/linux/runctypes/runc.pb.go
@@ -8,6 +8,7 @@ import (
proto "github.com/gogo/protobuf/proto"
io "io"
math "math"
+ math_bits "math/bits"
reflect "reflect"
strings "strings"
)
@@ -21,7 +22,7 @@ var _ = math.Inf
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
type RuncOptions struct {
Runtime string `protobuf:"bytes,1,opt,name=runtime,proto3" json:"runtime,omitempty"`
@@ -46,7 +47,7 @@ func (m *RuncOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
return xxx_messageInfo_RuncOptions.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -97,7 +98,7 @@ func (m *CreateOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error
return xxx_messageInfo_CreateOptions.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -144,7 +145,7 @@ func (m *CheckpointOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, e
return xxx_messageInfo_CheckpointOptions.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -183,7 +184,7 @@ func (m *ProcessDetails) XXX_Marshal(b []byte, deterministic bool) ([]byte, erro
return xxx_messageInfo_ProcessDetails.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -258,7 +259,7 @@ var fileDescriptor_d20e2ba8b3cc58b9 = []byte{
func (m *RuncOptions) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -266,48 +267,57 @@ func (m *RuncOptions) Marshal() (dAtA []byte, err error) {
}
func (m *RuncOptions) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *RuncOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.Runtime) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintRunc(dAtA, i, uint64(len(m.Runtime)))
- i += copy(dAtA[i:], m.Runtime)
- }
- if len(m.RuntimeRoot) > 0 {
- dAtA[i] = 0x12
- i++
- i = encodeVarintRunc(dAtA, i, uint64(len(m.RuntimeRoot)))
- i += copy(dAtA[i:], m.RuntimeRoot)
- }
- if len(m.CriuPath) > 0 {
- dAtA[i] = 0x1a
- i++
- i = encodeVarintRunc(dAtA, i, uint64(len(m.CriuPath)))
- i += copy(dAtA[i:], m.CriuPath)
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
if m.SystemdCgroup {
- dAtA[i] = 0x20
- i++
+ i--
if m.SystemdCgroup {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
- i++
+ i--
+ dAtA[i] = 0x20
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if len(m.CriuPath) > 0 {
+ i -= len(m.CriuPath)
+ copy(dAtA[i:], m.CriuPath)
+ i = encodeVarintRunc(dAtA, i, uint64(len(m.CriuPath)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if len(m.RuntimeRoot) > 0 {
+ i -= len(m.RuntimeRoot)
+ copy(dAtA[i:], m.RuntimeRoot)
+ i = encodeVarintRunc(dAtA, i, uint64(len(m.RuntimeRoot)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Runtime) > 0 {
+ i -= len(m.Runtime)
+ copy(dAtA[i:], m.Runtime)
+ i = encodeVarintRunc(dAtA, i, uint64(len(m.Runtime)))
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *CreateOptions) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -315,129 +325,133 @@ func (m *CreateOptions) Marshal() (dAtA []byte, err error) {
}
func (m *CreateOptions) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *CreateOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if m.NoPivotRoot {
- dAtA[i] = 0x8
- i++
- if m.NoPivotRoot {
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.CriuImagePath) > 0 {
+ i -= len(m.CriuImagePath)
+ copy(dAtA[i:], m.CriuImagePath)
+ i = encodeVarintRunc(dAtA, i, uint64(len(m.CriuImagePath)))
+ i--
+ dAtA[i] = 0x6a
+ }
+ if len(m.CriuWorkPath) > 0 {
+ i -= len(m.CriuWorkPath)
+ copy(dAtA[i:], m.CriuWorkPath)
+ i = encodeVarintRunc(dAtA, i, uint64(len(m.CriuWorkPath)))
+ i--
+ dAtA[i] = 0x62
+ }
+ if m.IoGid != 0 {
+ i = encodeVarintRunc(dAtA, i, uint64(m.IoGid))
+ i--
+ dAtA[i] = 0x58
+ }
+ if m.IoUid != 0 {
+ i = encodeVarintRunc(dAtA, i, uint64(m.IoUid))
+ i--
+ dAtA[i] = 0x50
+ }
+ if len(m.ShimCgroup) > 0 {
+ i -= len(m.ShimCgroup)
+ copy(dAtA[i:], m.ShimCgroup)
+ i = encodeVarintRunc(dAtA, i, uint64(len(m.ShimCgroup)))
+ i--
+ dAtA[i] = 0x4a
+ }
+ if m.NoNewKeyring {
+ i--
+ if m.NoNewKeyring {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
- i++
+ i--
+ dAtA[i] = 0x40
}
- if m.OpenTcp {
- dAtA[i] = 0x10
- i++
- if m.OpenTcp {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
+ if len(m.CgroupsMode) > 0 {
+ i -= len(m.CgroupsMode)
+ copy(dAtA[i:], m.CgroupsMode)
+ i = encodeVarintRunc(dAtA, i, uint64(len(m.CgroupsMode)))
+ i--
+ dAtA[i] = 0x3a
+ }
+ if len(m.EmptyNamespaces) > 0 {
+ for iNdEx := len(m.EmptyNamespaces) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.EmptyNamespaces[iNdEx])
+ copy(dAtA[i:], m.EmptyNamespaces[iNdEx])
+ i = encodeVarintRunc(dAtA, i, uint64(len(m.EmptyNamespaces[iNdEx])))
+ i--
+ dAtA[i] = 0x32
}
- i++
}
- if m.ExternalUnixSockets {
- dAtA[i] = 0x18
- i++
- if m.ExternalUnixSockets {
+ if m.FileLocks {
+ i--
+ if m.FileLocks {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
- i++
+ i--
+ dAtA[i] = 0x28
}
if m.Terminal {
- dAtA[i] = 0x20
- i++
+ i--
if m.Terminal {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
- i++
+ i--
+ dAtA[i] = 0x20
}
- if m.FileLocks {
- dAtA[i] = 0x28
- i++
- if m.FileLocks {
+ if m.ExternalUnixSockets {
+ i--
+ if m.ExternalUnixSockets {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
- i++
+ i--
+ dAtA[i] = 0x18
}
- if len(m.EmptyNamespaces) > 0 {
- for _, s := range m.EmptyNamespaces {
- dAtA[i] = 0x32
- i++
- l = len(s)
- for l >= 1<<7 {
- dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
- l >>= 7
- i++
- }
- dAtA[i] = uint8(l)
- i++
- i += copy(dAtA[i:], s)
+ if m.OpenTcp {
+ i--
+ if m.OpenTcp {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
}
+ i--
+ dAtA[i] = 0x10
}
- if len(m.CgroupsMode) > 0 {
- dAtA[i] = 0x3a
- i++
- i = encodeVarintRunc(dAtA, i, uint64(len(m.CgroupsMode)))
- i += copy(dAtA[i:], m.CgroupsMode)
- }
- if m.NoNewKeyring {
- dAtA[i] = 0x40
- i++
- if m.NoNewKeyring {
+ if m.NoPivotRoot {
+ i--
+ if m.NoPivotRoot {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
- i++
- }
- if len(m.ShimCgroup) > 0 {
- dAtA[i] = 0x4a
- i++
- i = encodeVarintRunc(dAtA, i, uint64(len(m.ShimCgroup)))
- i += copy(dAtA[i:], m.ShimCgroup)
- }
- if m.IoUid != 0 {
- dAtA[i] = 0x50
- i++
- i = encodeVarintRunc(dAtA, i, uint64(m.IoUid))
- }
- if m.IoGid != 0 {
- dAtA[i] = 0x58
- i++
- i = encodeVarintRunc(dAtA, i, uint64(m.IoGid))
- }
- if len(m.CriuWorkPath) > 0 {
- dAtA[i] = 0x62
- i++
- i = encodeVarintRunc(dAtA, i, uint64(len(m.CriuWorkPath)))
- i += copy(dAtA[i:], m.CriuWorkPath)
- }
- if len(m.CriuImagePath) > 0 {
- dAtA[i] = 0x6a
- i++
- i = encodeVarintRunc(dAtA, i, uint64(len(m.CriuImagePath)))
- i += copy(dAtA[i:], m.CriuImagePath)
- }
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ i--
+ dAtA[i] = 0x8
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *CheckpointOptions) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -445,103 +459,106 @@ func (m *CheckpointOptions) Marshal() (dAtA []byte, err error) {
}
func (m *CheckpointOptions) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *CheckpointOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if m.Exit {
- dAtA[i] = 0x8
- i++
- if m.Exit {
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.ImagePath) > 0 {
+ i -= len(m.ImagePath)
+ copy(dAtA[i:], m.ImagePath)
+ i = encodeVarintRunc(dAtA, i, uint64(len(m.ImagePath)))
+ i--
+ dAtA[i] = 0x4a
+ }
+ if len(m.WorkPath) > 0 {
+ i -= len(m.WorkPath)
+ copy(dAtA[i:], m.WorkPath)
+ i = encodeVarintRunc(dAtA, i, uint64(len(m.WorkPath)))
+ i--
+ dAtA[i] = 0x42
+ }
+ if len(m.CgroupsMode) > 0 {
+ i -= len(m.CgroupsMode)
+ copy(dAtA[i:], m.CgroupsMode)
+ i = encodeVarintRunc(dAtA, i, uint64(len(m.CgroupsMode)))
+ i--
+ dAtA[i] = 0x3a
+ }
+ if len(m.EmptyNamespaces) > 0 {
+ for iNdEx := len(m.EmptyNamespaces) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.EmptyNamespaces[iNdEx])
+ copy(dAtA[i:], m.EmptyNamespaces[iNdEx])
+ i = encodeVarintRunc(dAtA, i, uint64(len(m.EmptyNamespaces[iNdEx])))
+ i--
+ dAtA[i] = 0x32
+ }
+ }
+ if m.FileLocks {
+ i--
+ if m.FileLocks {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
- i++
+ i--
+ dAtA[i] = 0x28
}
- if m.OpenTcp {
- dAtA[i] = 0x10
- i++
- if m.OpenTcp {
+ if m.Terminal {
+ i--
+ if m.Terminal {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
- i++
+ i--
+ dAtA[i] = 0x20
}
if m.ExternalUnixSockets {
- dAtA[i] = 0x18
- i++
+ i--
if m.ExternalUnixSockets {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
- i++
+ i--
+ dAtA[i] = 0x18
}
- if m.Terminal {
- dAtA[i] = 0x20
- i++
- if m.Terminal {
+ if m.OpenTcp {
+ i--
+ if m.OpenTcp {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
- i++
+ i--
+ dAtA[i] = 0x10
}
- if m.FileLocks {
- dAtA[i] = 0x28
- i++
- if m.FileLocks {
+ if m.Exit {
+ i--
+ if m.Exit {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
- i++
- }
- if len(m.EmptyNamespaces) > 0 {
- for _, s := range m.EmptyNamespaces {
- dAtA[i] = 0x32
- i++
- l = len(s)
- for l >= 1<<7 {
- dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
- l >>= 7
- i++
- }
- dAtA[i] = uint8(l)
- i++
- i += copy(dAtA[i:], s)
- }
- }
- if len(m.CgroupsMode) > 0 {
- dAtA[i] = 0x3a
- i++
- i = encodeVarintRunc(dAtA, i, uint64(len(m.CgroupsMode)))
- i += copy(dAtA[i:], m.CgroupsMode)
- }
- if len(m.WorkPath) > 0 {
- dAtA[i] = 0x42
- i++
- i = encodeVarintRunc(dAtA, i, uint64(len(m.WorkPath)))
- i += copy(dAtA[i:], m.WorkPath)
- }
- if len(m.ImagePath) > 0 {
- dAtA[i] = 0x4a
- i++
- i = encodeVarintRunc(dAtA, i, uint64(len(m.ImagePath)))
- i += copy(dAtA[i:], m.ImagePath)
- }
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ i--
+ dAtA[i] = 0x8
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *ProcessDetails) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -549,30 +566,39 @@ func (m *ProcessDetails) Marshal() (dAtA []byte, err error) {
}
func (m *ProcessDetails) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ProcessDetails) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if len(m.ExecID) > 0 {
- dAtA[i] = 0xa
- i++
+ i -= len(m.ExecID)
+ copy(dAtA[i:], m.ExecID)
i = encodeVarintRunc(dAtA, i, uint64(len(m.ExecID)))
- i += copy(dAtA[i:], m.ExecID)
- }
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func encodeVarintRunc(dAtA []byte, offset int, v uint64) int {
+ offset -= sovRunc(v)
+ base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
- return offset + 1
+ return base
}
func (m *RuncOptions) Size() (n int) {
if m == nil {
@@ -721,14 +747,7 @@ func (m *ProcessDetails) Size() (n int) {
}
func sovRunc(x uint64) (n int) {
- for {
- n++
- x >>= 7
- if x == 0 {
- break
- }
- }
- return n
+ return (math_bits.Len64(x|1) + 6) / 7
}
func sozRunc(x uint64) (n int) {
return sovRunc(uint64((x << 1) ^ uint64((int64(x) >> 63))))
@@ -1721,6 +1740,7 @@ func (m *ProcessDetails) Unmarshal(dAtA []byte) error {
func skipRunc(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
+ depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
@@ -1752,10 +1772,8 @@ func skipRunc(dAtA []byte) (n int, err error) {
break
}
}
- return iNdEx, nil
case 1:
iNdEx += 8
- return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
@@ -1776,55 +1794,30 @@ func skipRunc(dAtA []byte) (n int, err error) {
return 0, ErrInvalidLengthRunc
}
iNdEx += length
- if iNdEx < 0 {
- return 0, ErrInvalidLengthRunc
- }
- return iNdEx, nil
case 3:
- for {
- var innerWire uint64
- var start int = iNdEx
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowRunc
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- innerWire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- innerWireType := int(innerWire & 0x7)
- if innerWireType == 4 {
- break
- }
- next, err := skipRunc(dAtA[start:])
- if err != nil {
- return 0, err
- }
- iNdEx = start + next
- if iNdEx < 0 {
- return 0, ErrInvalidLengthRunc
- }
- }
- return iNdEx, nil
+ depth++
case 4:
- return iNdEx, nil
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupRunc
+ }
+ depth--
case 5:
iNdEx += 4
- return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthRunc
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
}
- panic("unreachable")
+ return 0, io.ErrUnexpectedEOF
}
var (
- ErrInvalidLengthRunc = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowRunc = fmt.Errorf("proto: integer overflow")
+ ErrInvalidLengthRunc = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowRunc = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupRunc = fmt.Errorf("proto: unexpected end of group")
)
diff --git a/vendor/github.com/containerd/containerd/runtime/v1/linux/runtime.go b/vendor/github.com/containerd/containerd/runtime/v1/linux/runtime.go
index fdaff5f9e..b2101f326 100644
--- a/vendor/github.com/containerd/containerd/runtime/v1/linux/runtime.go
+++ b/vendor/github.com/containerd/containerd/runtime/v1/linux/runtime.go
@@ -62,6 +62,9 @@ const (
configFilename = "config.json"
defaultRuntime = "runc"
defaultShim = "containerd-shim"
+
+ // cleanupTimeout is default timeout for cleanup operations
+ cleanupTimeout = 1 * time.Minute
)
func init() {
@@ -212,7 +215,10 @@ func (r *Runtime) Create(ctx context.Context, id string, opts runtime.CreateOpts
}
defer func() {
if err != nil {
- if kerr := s.KillShim(ctx); kerr != nil {
+ deferCtx, deferCancel := context.WithTimeout(
+ namespaces.WithNamespace(context.TODO(), namespace), cleanupTimeout)
+ defer deferCancel()
+ if kerr := s.KillShim(deferCtx); kerr != nil {
log.G(ctx).WithError(err).Error("failed to kill shim")
}
}
diff --git a/vendor/github.com/containerd/containerd/runtime/v1/shim/client/client.go b/vendor/github.com/containerd/containerd/runtime/v1/shim/client/client.go
index 7c68248c5..116e84083 100644
--- a/vendor/github.com/containerd/containerd/runtime/v1/shim/client/client.go
+++ b/vendor/github.com/containerd/containerd/runtime/v1/shim/client/client.go
@@ -40,6 +40,7 @@ import (
"github.com/containerd/containerd/events"
"github.com/containerd/containerd/log"
+ "github.com/containerd/containerd/pkg/dialer"
v1 "github.com/containerd/containerd/runtime/v1"
"github.com/containerd/containerd/runtime/v1/shim"
shimapi "github.com/containerd/containerd/runtime/v1/shim/v1"
@@ -228,7 +229,7 @@ func connect(address string, d func(string, time.Duration) (net.Conn, error)) (n
func annonDialer(address string, timeout time.Duration) (net.Conn, error) {
address = strings.TrimPrefix(address, "unix://")
- return net.DialTimeout("unix", "\x00"+address, timeout)
+ return dialer.Dialer("\x00"+address, timeout)
}
// WithConnect connects to an existing shim
diff --git a/vendor/github.com/containerd/containerd/runtime/v1/shim/service.go b/vendor/github.com/containerd/containerd/runtime/v1/shim/service.go
index f3e1f4b7c..c0ec03931 100644
--- a/vendor/github.com/containerd/containerd/runtime/v1/shim/service.go
+++ b/vendor/github.com/containerd/containerd/runtime/v1/shim/service.go
@@ -521,13 +521,8 @@ func (s *Service) checkProcesses(e runc.Exit) {
}
if ip, ok := p.(*process.Init); ok {
- shouldKillAll, err := shouldKillAllOnExit(s.bundle)
- if err != nil {
- log.G(s.context).WithError(err).Error("failed to check shouldKillAll")
- }
-
// Ensure all children are killed
- if shouldKillAll {
+ if shouldKillAllOnExit(s.context, s.bundle) {
if err := ip.KillAll(s.context); err != nil {
log.G(s.context).WithError(err).WithField("id", ip.ID()).
Error("failed to kill init's children")
@@ -547,23 +542,25 @@ func (s *Service) checkProcesses(e runc.Exit) {
}
}
-func shouldKillAllOnExit(bundlePath string) (bool, error) {
+func shouldKillAllOnExit(ctx context.Context, bundlePath string) bool {
var bundleSpec specs.Spec
bundleConfigContents, err := ioutil.ReadFile(filepath.Join(bundlePath, "config.json"))
if err != nil {
- return false, err
+ log.G(ctx).WithError(err).Error("shouldKillAllOnExit: failed to read config.json")
+ return true
+ }
+ if err := json.Unmarshal(bundleConfigContents, &bundleSpec); err != nil {
+ log.G(ctx).WithError(err).Error("shouldKillAllOnExit: failed to unmarshal bundle json")
+ return true
}
- json.Unmarshal(bundleConfigContents, &bundleSpec)
-
if bundleSpec.Linux != nil {
for _, ns := range bundleSpec.Linux.Namespaces {
if ns.Type == specs.PIDNamespace && ns.Path == "" {
- return false, nil
+ return false
}
}
}
-
- return true, nil
+ return true
}
func (s *Service) getContainerPids(ctx context.Context, id string) ([]uint32, error) {
diff --git a/vendor/github.com/containerd/containerd/runtime/v1/shim/v1/shim.pb.go b/vendor/github.com/containerd/containerd/runtime/v1/shim/v1/shim.pb.go
index 7cc57803c..27f334966 100644
--- a/vendor/github.com/containerd/containerd/runtime/v1/shim/v1/shim.pb.go
+++ b/vendor/github.com/containerd/containerd/runtime/v1/shim/v1/shim.pb.go
@@ -14,6 +14,7 @@ import (
types1 "github.com/gogo/protobuf/types"
io "io"
math "math"
+ math_bits "math/bits"
reflect "reflect"
strings "strings"
time "time"
@@ -29,7 +30,7 @@ var _ = time.Kitchen
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
type CreateTaskRequest struct {
ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
@@ -61,7 +62,7 @@ func (m *CreateTaskRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, e
return xxx_messageInfo_CreateTaskRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -100,7 +101,7 @@ func (m *CreateTaskResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte,
return xxx_messageInfo_CreateTaskResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -141,7 +142,7 @@ func (m *DeleteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, erro
return xxx_messageInfo_DeleteResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -180,7 +181,7 @@ func (m *DeleteProcessRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte
return xxx_messageInfo_DeleteProcessRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -224,7 +225,7 @@ func (m *ExecProcessRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte,
return xxx_messageInfo_ExecProcessRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -262,7 +263,7 @@ func (m *ExecProcessResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte,
return xxx_messageInfo_ExecProcessResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -303,7 +304,7 @@ func (m *ResizePtyRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, er
return xxx_messageInfo_ResizePtyRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -342,7 +343,7 @@ func (m *StateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
return xxx_messageInfo_StateRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -390,7 +391,7 @@ func (m *StateResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error
return xxx_messageInfo_StateResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -431,7 +432,7 @@ func (m *KillRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
return xxx_messageInfo_KillRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -471,7 +472,7 @@ func (m *CloseIORequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, erro
return xxx_messageInfo_CloseIORequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -510,7 +511,7 @@ func (m *ListPidsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, err
return xxx_messageInfo_ListPidsRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -549,7 +550,7 @@ func (m *ListPidsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, er
return xxx_messageInfo_ListPidsResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -589,7 +590,7 @@ func (m *CheckpointTaskRequest) XXX_Marshal(b []byte, deterministic bool) ([]byt
return xxx_messageInfo_CheckpointTaskRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -628,7 +629,7 @@ func (m *ShimInfoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, er
return xxx_messageInfo_ShimInfoResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -667,7 +668,7 @@ func (m *UpdateTaskRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, e
return xxx_messageInfo_UpdateTaskRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -706,7 +707,7 @@ func (m *StartRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
return xxx_messageInfo_StartRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -746,7 +747,7 @@ func (m *StartResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error
return xxx_messageInfo_StartResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -785,7 +786,7 @@ func (m *WaitRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
return xxx_messageInfo_WaitRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -825,7 +826,7 @@ func (m *WaitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
return xxx_messageInfo_WaitResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -949,7 +950,7 @@ var fileDescriptor_be1b2ef30ea3b8ef = []byte{
func (m *CreateTaskRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -957,100 +958,118 @@ func (m *CreateTaskRequest) Marshal() (dAtA []byte, err error) {
}
func (m *CreateTaskRequest) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *CreateTaskRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.ID) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintShim(dAtA, i, uint64(len(m.ID)))
- i += copy(dAtA[i:], m.ID)
- }
- if len(m.Bundle) > 0 {
- dAtA[i] = 0x12
- i++
- i = encodeVarintShim(dAtA, i, uint64(len(m.Bundle)))
- i += copy(dAtA[i:], m.Bundle)
- }
- if len(m.Runtime) > 0 {
- dAtA[i] = 0x1a
- i++
- i = encodeVarintShim(dAtA, i, uint64(len(m.Runtime)))
- i += copy(dAtA[i:], m.Runtime)
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
- if len(m.Rootfs) > 0 {
- for _, msg := range m.Rootfs {
- dAtA[i] = 0x22
- i++
- i = encodeVarintShim(dAtA, i, uint64(msg.Size()))
- n, err := msg.MarshalTo(dAtA[i:])
+ if m.Options != nil {
+ {
+ size, err := m.Options.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
- i += n
+ i -= size
+ i = encodeVarintShim(dAtA, i, uint64(size))
}
+ i--
+ dAtA[i] = 0x5a
+ }
+ if len(m.ParentCheckpoint) > 0 {
+ i -= len(m.ParentCheckpoint)
+ copy(dAtA[i:], m.ParentCheckpoint)
+ i = encodeVarintShim(dAtA, i, uint64(len(m.ParentCheckpoint)))
+ i--
+ dAtA[i] = 0x52
+ }
+ if len(m.Checkpoint) > 0 {
+ i -= len(m.Checkpoint)
+ copy(dAtA[i:], m.Checkpoint)
+ i = encodeVarintShim(dAtA, i, uint64(len(m.Checkpoint)))
+ i--
+ dAtA[i] = 0x4a
+ }
+ if len(m.Stderr) > 0 {
+ i -= len(m.Stderr)
+ copy(dAtA[i:], m.Stderr)
+ i = encodeVarintShim(dAtA, i, uint64(len(m.Stderr)))
+ i--
+ dAtA[i] = 0x42
+ }
+ if len(m.Stdout) > 0 {
+ i -= len(m.Stdout)
+ copy(dAtA[i:], m.Stdout)
+ i = encodeVarintShim(dAtA, i, uint64(len(m.Stdout)))
+ i--
+ dAtA[i] = 0x3a
+ }
+ if len(m.Stdin) > 0 {
+ i -= len(m.Stdin)
+ copy(dAtA[i:], m.Stdin)
+ i = encodeVarintShim(dAtA, i, uint64(len(m.Stdin)))
+ i--
+ dAtA[i] = 0x32
}
if m.Terminal {
- dAtA[i] = 0x28
- i++
+ i--
if m.Terminal {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
- i++
- }
- if len(m.Stdin) > 0 {
- dAtA[i] = 0x32
- i++
- i = encodeVarintShim(dAtA, i, uint64(len(m.Stdin)))
- i += copy(dAtA[i:], m.Stdin)
- }
- if len(m.Stdout) > 0 {
- dAtA[i] = 0x3a
- i++
- i = encodeVarintShim(dAtA, i, uint64(len(m.Stdout)))
- i += copy(dAtA[i:], m.Stdout)
- }
- if len(m.Stderr) > 0 {
- dAtA[i] = 0x42
- i++
- i = encodeVarintShim(dAtA, i, uint64(len(m.Stderr)))
- i += copy(dAtA[i:], m.Stderr)
+ i--
+ dAtA[i] = 0x28
}
- if len(m.Checkpoint) > 0 {
- dAtA[i] = 0x4a
- i++
- i = encodeVarintShim(dAtA, i, uint64(len(m.Checkpoint)))
- i += copy(dAtA[i:], m.Checkpoint)
+ if len(m.Rootfs) > 0 {
+ for iNdEx := len(m.Rootfs) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Rootfs[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintShim(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
}
- if len(m.ParentCheckpoint) > 0 {
- dAtA[i] = 0x52
- i++
- i = encodeVarintShim(dAtA, i, uint64(len(m.ParentCheckpoint)))
- i += copy(dAtA[i:], m.ParentCheckpoint)
+ if len(m.Runtime) > 0 {
+ i -= len(m.Runtime)
+ copy(dAtA[i:], m.Runtime)
+ i = encodeVarintShim(dAtA, i, uint64(len(m.Runtime)))
+ i--
+ dAtA[i] = 0x1a
}
- if m.Options != nil {
- dAtA[i] = 0x5a
- i++
- i = encodeVarintShim(dAtA, i, uint64(m.Options.Size()))
- n1, err := m.Options.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
- }
- i += n1
+ if len(m.Bundle) > 0 {
+ i -= len(m.Bundle)
+ copy(dAtA[i:], m.Bundle)
+ i = encodeVarintShim(dAtA, i, uint64(len(m.Bundle)))
+ i--
+ dAtA[i] = 0x12
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if len(m.ID) > 0 {
+ i -= len(m.ID)
+ copy(dAtA[i:], m.ID)
+ i = encodeVarintShim(dAtA, i, uint64(len(m.ID)))
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *CreateTaskResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1058,25 +1077,31 @@ func (m *CreateTaskResponse) Marshal() (dAtA []byte, err error) {
}
func (m *CreateTaskResponse) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *CreateTaskResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if m.Pid != 0 {
- dAtA[i] = 0x8
- i++
i = encodeVarintShim(dAtA, i, uint64(m.Pid))
+ i--
+ dAtA[i] = 0x8
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
- }
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *DeleteResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1084,38 +1109,44 @@ func (m *DeleteResponse) Marshal() (dAtA []byte, err error) {
}
func (m *DeleteResponse) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DeleteResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if m.Pid != 0 {
- dAtA[i] = 0x8
- i++
- i = encodeVarintShim(dAtA, i, uint64(m.Pid))
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
- if m.ExitStatus != 0 {
- dAtA[i] = 0x10
- i++
- i = encodeVarintShim(dAtA, i, uint64(m.ExitStatus))
+ n2, err2 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.ExitedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt):])
+ if err2 != nil {
+ return 0, err2
}
+ i -= n2
+ i = encodeVarintShim(dAtA, i, uint64(n2))
+ i--
dAtA[i] = 0x1a
- i++
- i = encodeVarintShim(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt)))
- n2, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.ExitedAt, dAtA[i:])
- if err != nil {
- return 0, err
+ if m.ExitStatus != 0 {
+ i = encodeVarintShim(dAtA, i, uint64(m.ExitStatus))
+ i--
+ dAtA[i] = 0x10
}
- i += n2
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if m.Pid != 0 {
+ i = encodeVarintShim(dAtA, i, uint64(m.Pid))
+ i--
+ dAtA[i] = 0x8
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *DeleteProcessRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1123,26 +1154,33 @@ func (m *DeleteProcessRequest) Marshal() (dAtA []byte, err error) {
}
func (m *DeleteProcessRequest) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DeleteProcessRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if len(m.ID) > 0 {
- dAtA[i] = 0xa
- i++
+ i -= len(m.ID)
+ copy(dAtA[i:], m.ID)
i = encodeVarintShim(dAtA, i, uint64(len(m.ID)))
- i += copy(dAtA[i:], m.ID)
- }
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *ExecProcessRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1150,64 +1188,76 @@ func (m *ExecProcessRequest) Marshal() (dAtA []byte, err error) {
}
func (m *ExecProcessRequest) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ExecProcessRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.ID) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintShim(dAtA, i, uint64(len(m.ID)))
- i += copy(dAtA[i:], m.ID)
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
- if m.Terminal {
- dAtA[i] = 0x10
- i++
- if m.Terminal {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
+ if m.Spec != nil {
+ {
+ size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintShim(dAtA, i, uint64(size))
}
- i++
+ i--
+ dAtA[i] = 0x32
}
- if len(m.Stdin) > 0 {
- dAtA[i] = 0x1a
- i++
- i = encodeVarintShim(dAtA, i, uint64(len(m.Stdin)))
- i += copy(dAtA[i:], m.Stdin)
+ if len(m.Stderr) > 0 {
+ i -= len(m.Stderr)
+ copy(dAtA[i:], m.Stderr)
+ i = encodeVarintShim(dAtA, i, uint64(len(m.Stderr)))
+ i--
+ dAtA[i] = 0x2a
}
if len(m.Stdout) > 0 {
- dAtA[i] = 0x22
- i++
+ i -= len(m.Stdout)
+ copy(dAtA[i:], m.Stdout)
i = encodeVarintShim(dAtA, i, uint64(len(m.Stdout)))
- i += copy(dAtA[i:], m.Stdout)
+ i--
+ dAtA[i] = 0x22
}
- if len(m.Stderr) > 0 {
- dAtA[i] = 0x2a
- i++
- i = encodeVarintShim(dAtA, i, uint64(len(m.Stderr)))
- i += copy(dAtA[i:], m.Stderr)
+ if len(m.Stdin) > 0 {
+ i -= len(m.Stdin)
+ copy(dAtA[i:], m.Stdin)
+ i = encodeVarintShim(dAtA, i, uint64(len(m.Stdin)))
+ i--
+ dAtA[i] = 0x1a
}
- if m.Spec != nil {
- dAtA[i] = 0x32
- i++
- i = encodeVarintShim(dAtA, i, uint64(m.Spec.Size()))
- n3, err := m.Spec.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
+ if m.Terminal {
+ i--
+ if m.Terminal {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
}
- i += n3
+ i--
+ dAtA[i] = 0x10
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if len(m.ID) > 0 {
+ i -= len(m.ID)
+ copy(dAtA[i:], m.ID)
+ i = encodeVarintShim(dAtA, i, uint64(len(m.ID)))
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *ExecProcessResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1215,20 +1265,26 @@ func (m *ExecProcessResponse) Marshal() (dAtA []byte, err error) {
}
func (m *ExecProcessResponse) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ExecProcessResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *ResizePtyRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1236,36 +1292,43 @@ func (m *ResizePtyRequest) Marshal() (dAtA []byte, err error) {
}
func (m *ResizePtyRequest) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ResizePtyRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.ID) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintShim(dAtA, i, uint64(len(m.ID)))
- i += copy(dAtA[i:], m.ID)
- }
- if m.Width != 0 {
- dAtA[i] = 0x10
- i++
- i = encodeVarintShim(dAtA, i, uint64(m.Width))
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
if m.Height != 0 {
- dAtA[i] = 0x18
- i++
i = encodeVarintShim(dAtA, i, uint64(m.Height))
+ i--
+ dAtA[i] = 0x18
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if m.Width != 0 {
+ i = encodeVarintShim(dAtA, i, uint64(m.Width))
+ i--
+ dAtA[i] = 0x10
}
- return i, nil
+ if len(m.ID) > 0 {
+ i -= len(m.ID)
+ copy(dAtA[i:], m.ID)
+ i = encodeVarintShim(dAtA, i, uint64(len(m.ID)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
}
func (m *StateRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1273,26 +1336,33 @@ func (m *StateRequest) Marshal() (dAtA []byte, err error) {
}
func (m *StateRequest) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *StateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if len(m.ID) > 0 {
- dAtA[i] = 0xa
- i++
+ i -= len(m.ID)
+ copy(dAtA[i:], m.ID)
i = encodeVarintShim(dAtA, i, uint64(len(m.ID)))
- i += copy(dAtA[i:], m.ID)
- }
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *StateResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1300,83 +1370,94 @@ func (m *StateResponse) Marshal() (dAtA []byte, err error) {
}
func (m *StateResponse) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *StateResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.ID) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintShim(dAtA, i, uint64(len(m.ID)))
- i += copy(dAtA[i:], m.ID)
- }
- if len(m.Bundle) > 0 {
- dAtA[i] = 0x12
- i++
- i = encodeVarintShim(dAtA, i, uint64(len(m.Bundle)))
- i += copy(dAtA[i:], m.Bundle)
- }
- if m.Pid != 0 {
- dAtA[i] = 0x18
- i++
- i = encodeVarintShim(dAtA, i, uint64(m.Pid))
- }
- if m.Status != 0 {
- dAtA[i] = 0x20
- i++
- i = encodeVarintShim(dAtA, i, uint64(m.Status))
- }
- if len(m.Stdin) > 0 {
- dAtA[i] = 0x2a
- i++
- i = encodeVarintShim(dAtA, i, uint64(len(m.Stdin)))
- i += copy(dAtA[i:], m.Stdin)
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
- if len(m.Stdout) > 0 {
- dAtA[i] = 0x32
- i++
- i = encodeVarintShim(dAtA, i, uint64(len(m.Stdout)))
- i += copy(dAtA[i:], m.Stdout)
+ n4, err4 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.ExitedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt):])
+ if err4 != nil {
+ return 0, err4
}
- if len(m.Stderr) > 0 {
- dAtA[i] = 0x3a
- i++
- i = encodeVarintShim(dAtA, i, uint64(len(m.Stderr)))
- i += copy(dAtA[i:], m.Stderr)
+ i -= n4
+ i = encodeVarintShim(dAtA, i, uint64(n4))
+ i--
+ dAtA[i] = 0x52
+ if m.ExitStatus != 0 {
+ i = encodeVarintShim(dAtA, i, uint64(m.ExitStatus))
+ i--
+ dAtA[i] = 0x48
}
if m.Terminal {
- dAtA[i] = 0x40
- i++
+ i--
if m.Terminal {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
- i++
+ i--
+ dAtA[i] = 0x40
}
- if m.ExitStatus != 0 {
- dAtA[i] = 0x48
- i++
- i = encodeVarintShim(dAtA, i, uint64(m.ExitStatus))
+ if len(m.Stderr) > 0 {
+ i -= len(m.Stderr)
+ copy(dAtA[i:], m.Stderr)
+ i = encodeVarintShim(dAtA, i, uint64(len(m.Stderr)))
+ i--
+ dAtA[i] = 0x3a
}
- dAtA[i] = 0x52
- i++
- i = encodeVarintShim(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt)))
- n4, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.ExitedAt, dAtA[i:])
- if err != nil {
- return 0, err
+ if len(m.Stdout) > 0 {
+ i -= len(m.Stdout)
+ copy(dAtA[i:], m.Stdout)
+ i = encodeVarintShim(dAtA, i, uint64(len(m.Stdout)))
+ i--
+ dAtA[i] = 0x32
}
- i += n4
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if len(m.Stdin) > 0 {
+ i -= len(m.Stdin)
+ copy(dAtA[i:], m.Stdin)
+ i = encodeVarintShim(dAtA, i, uint64(len(m.Stdin)))
+ i--
+ dAtA[i] = 0x2a
+ }
+ if m.Status != 0 {
+ i = encodeVarintShim(dAtA, i, uint64(m.Status))
+ i--
+ dAtA[i] = 0x20
+ }
+ if m.Pid != 0 {
+ i = encodeVarintShim(dAtA, i, uint64(m.Pid))
+ i--
+ dAtA[i] = 0x18
+ }
+ if len(m.Bundle) > 0 {
+ i -= len(m.Bundle)
+ copy(dAtA[i:], m.Bundle)
+ i = encodeVarintShim(dAtA, i, uint64(len(m.Bundle)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.ID) > 0 {
+ i -= len(m.ID)
+ copy(dAtA[i:], m.ID)
+ i = encodeVarintShim(dAtA, i, uint64(len(m.ID)))
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *KillRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1384,41 +1465,48 @@ func (m *KillRequest) Marshal() (dAtA []byte, err error) {
}
func (m *KillRequest) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *KillRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.ID) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintShim(dAtA, i, uint64(len(m.ID)))
- i += copy(dAtA[i:], m.ID)
- }
- if m.Signal != 0 {
- dAtA[i] = 0x10
- i++
- i = encodeVarintShim(dAtA, i, uint64(m.Signal))
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
if m.All {
- dAtA[i] = 0x18
- i++
+ i--
if m.All {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
- i++
+ i--
+ dAtA[i] = 0x18
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if m.Signal != 0 {
+ i = encodeVarintShim(dAtA, i, uint64(m.Signal))
+ i--
+ dAtA[i] = 0x10
}
- return i, nil
+ if len(m.ID) > 0 {
+ i -= len(m.ID)
+ copy(dAtA[i:], m.ID)
+ i = encodeVarintShim(dAtA, i, uint64(len(m.ID)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
}
func (m *CloseIORequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1426,36 +1514,43 @@ func (m *CloseIORequest) Marshal() (dAtA []byte, err error) {
}
func (m *CloseIORequest) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *CloseIORequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.ID) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintShim(dAtA, i, uint64(len(m.ID)))
- i += copy(dAtA[i:], m.ID)
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
if m.Stdin {
- dAtA[i] = 0x10
- i++
+ i--
if m.Stdin {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
- i++
+ i--
+ dAtA[i] = 0x10
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if len(m.ID) > 0 {
+ i -= len(m.ID)
+ copy(dAtA[i:], m.ID)
+ i = encodeVarintShim(dAtA, i, uint64(len(m.ID)))
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *ListPidsRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1463,26 +1558,33 @@ func (m *ListPidsRequest) Marshal() (dAtA []byte, err error) {
}
func (m *ListPidsRequest) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ListPidsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if len(m.ID) > 0 {
- dAtA[i] = 0xa
- i++
+ i -= len(m.ID)
+ copy(dAtA[i:], m.ID)
i = encodeVarintShim(dAtA, i, uint64(len(m.ID)))
- i += copy(dAtA[i:], m.ID)
- }
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *ListPidsResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1490,32 +1592,40 @@ func (m *ListPidsResponse) Marshal() (dAtA []byte, err error) {
}
func (m *ListPidsResponse) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ListPidsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if len(m.Processes) > 0 {
- for _, msg := range m.Processes {
- dAtA[i] = 0xa
- i++
- i = encodeVarintShim(dAtA, i, uint64(msg.Size()))
- n, err := msg.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
+ for iNdEx := len(m.Processes) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Processes[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintShim(dAtA, i, uint64(size))
}
- i += n
+ i--
+ dAtA[i] = 0xa
}
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
- }
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *CheckpointTaskRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1523,36 +1633,45 @@ func (m *CheckpointTaskRequest) Marshal() (dAtA []byte, err error) {
}
func (m *CheckpointTaskRequest) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *CheckpointTaskRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.Path) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintShim(dAtA, i, uint64(len(m.Path)))
- i += copy(dAtA[i:], m.Path)
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
if m.Options != nil {
- dAtA[i] = 0x12
- i++
- i = encodeVarintShim(dAtA, i, uint64(m.Options.Size()))
- n5, err := m.Options.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
+ {
+ size, err := m.Options.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintShim(dAtA, i, uint64(size))
}
- i += n5
+ i--
+ dAtA[i] = 0x12
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if len(m.Path) > 0 {
+ i -= len(m.Path)
+ copy(dAtA[i:], m.Path)
+ i = encodeVarintShim(dAtA, i, uint64(len(m.Path)))
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *ShimInfoResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1560,25 +1679,31 @@ func (m *ShimInfoResponse) Marshal() (dAtA []byte, err error) {
}
func (m *ShimInfoResponse) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ShimInfoResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if m.ShimPid != 0 {
- dAtA[i] = 0x8
- i++
i = encodeVarintShim(dAtA, i, uint64(m.ShimPid))
+ i--
+ dAtA[i] = 0x8
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
- }
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *UpdateTaskRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1586,30 +1711,38 @@ func (m *UpdateTaskRequest) Marshal() (dAtA []byte, err error) {
}
func (m *UpdateTaskRequest) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *UpdateTaskRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if m.Resources != nil {
- dAtA[i] = 0xa
- i++
- i = encodeVarintShim(dAtA, i, uint64(m.Resources.Size()))
- n6, err := m.Resources.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
+ {
+ size, err := m.Resources.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintShim(dAtA, i, uint64(size))
}
- i += n6
- }
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *StartRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1617,26 +1750,33 @@ func (m *StartRequest) Marshal() (dAtA []byte, err error) {
}
func (m *StartRequest) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *StartRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if len(m.ID) > 0 {
- dAtA[i] = 0xa
- i++
+ i -= len(m.ID)
+ copy(dAtA[i:], m.ID)
i = encodeVarintShim(dAtA, i, uint64(len(m.ID)))
- i += copy(dAtA[i:], m.ID)
- }
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *StartResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1644,31 +1784,38 @@ func (m *StartResponse) Marshal() (dAtA []byte, err error) {
}
func (m *StartResponse) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *StartResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.ID) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintShim(dAtA, i, uint64(len(m.ID)))
- i += copy(dAtA[i:], m.ID)
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
if m.Pid != 0 {
- dAtA[i] = 0x10
- i++
i = encodeVarintShim(dAtA, i, uint64(m.Pid))
+ i--
+ dAtA[i] = 0x10
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if len(m.ID) > 0 {
+ i -= len(m.ID)
+ copy(dAtA[i:], m.ID)
+ i = encodeVarintShim(dAtA, i, uint64(len(m.ID)))
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *WaitRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1676,26 +1823,33 @@ func (m *WaitRequest) Marshal() (dAtA []byte, err error) {
}
func (m *WaitRequest) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *WaitRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if len(m.ID) > 0 {
- dAtA[i] = 0xa
- i++
+ i -= len(m.ID)
+ copy(dAtA[i:], m.ID)
i = encodeVarintShim(dAtA, i, uint64(len(m.ID)))
- i += copy(dAtA[i:], m.ID)
- }
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *WaitResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1703,37 +1857,45 @@ func (m *WaitResponse) Marshal() (dAtA []byte, err error) {
}
func (m *WaitResponse) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *WaitResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if m.ExitStatus != 0 {
- dAtA[i] = 0x8
- i++
- i = encodeVarintShim(dAtA, i, uint64(m.ExitStatus))
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
- dAtA[i] = 0x12
- i++
- i = encodeVarintShim(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt)))
- n7, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.ExitedAt, dAtA[i:])
- if err != nil {
- return 0, err
+ n7, err7 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.ExitedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt):])
+ if err7 != nil {
+ return 0, err7
}
- i += n7
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ i -= n7
+ i = encodeVarintShim(dAtA, i, uint64(n7))
+ i--
+ dAtA[i] = 0x12
+ if m.ExitStatus != 0 {
+ i = encodeVarintShim(dAtA, i, uint64(m.ExitStatus))
+ i--
+ dAtA[i] = 0x8
}
- return i, nil
+ return len(dAtA) - i, nil
}
func encodeVarintShim(dAtA []byte, offset int, v uint64) int {
+ offset -= sovShim(v)
+ base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
- return offset + 1
+ return base
}
func (m *CreateTaskRequest) Size() (n int) {
if m == nil {
@@ -2169,14 +2331,7 @@ func (m *WaitResponse) Size() (n int) {
}
func sovShim(x uint64) (n int) {
- for {
- n++
- x >>= 7
- if x == 0 {
- break
- }
- }
- return n
+ return (math_bits.Len64(x|1) + 6) / 7
}
func sozShim(x uint64) (n int) {
return sovShim(uint64((x << 1) ^ uint64((int64(x) >> 63))))
@@ -2185,11 +2340,16 @@ func (this *CreateTaskRequest) String() string {
if this == nil {
return "nil"
}
+ repeatedStringForRootfs := "[]*Mount{"
+ for _, f := range this.Rootfs {
+ repeatedStringForRootfs += strings.Replace(fmt.Sprintf("%v", f), "Mount", "types.Mount", 1) + ","
+ }
+ repeatedStringForRootfs += "}"
s := strings.Join([]string{`&CreateTaskRequest{`,
`ID:` + fmt.Sprintf("%v", this.ID) + `,`,
`Bundle:` + fmt.Sprintf("%v", this.Bundle) + `,`,
`Runtime:` + fmt.Sprintf("%v", this.Runtime) + `,`,
- `Rootfs:` + strings.Replace(fmt.Sprintf("%v", this.Rootfs), "Mount", "types.Mount", 1) + `,`,
+ `Rootfs:` + repeatedStringForRootfs + `,`,
`Terminal:` + fmt.Sprintf("%v", this.Terminal) + `,`,
`Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`,
`Stdout:` + fmt.Sprintf("%v", this.Stdout) + `,`,
@@ -2220,7 +2380,7 @@ func (this *DeleteResponse) String() string {
s := strings.Join([]string{`&DeleteResponse{`,
`Pid:` + fmt.Sprintf("%v", this.Pid) + `,`,
`ExitStatus:` + fmt.Sprintf("%v", this.ExitStatus) + `,`,
- `ExitedAt:` + strings.Replace(strings.Replace(this.ExitedAt.String(), "Timestamp", "types1.Timestamp", 1), `&`, ``, 1) + `,`,
+ `ExitedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ExitedAt), "Timestamp", "types1.Timestamp", 1), `&`, ``, 1) + `,`,
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
`}`,
}, "")
@@ -2301,7 +2461,7 @@ func (this *StateResponse) String() string {
`Stderr:` + fmt.Sprintf("%v", this.Stderr) + `,`,
`Terminal:` + fmt.Sprintf("%v", this.Terminal) + `,`,
`ExitStatus:` + fmt.Sprintf("%v", this.ExitStatus) + `,`,
- `ExitedAt:` + strings.Replace(strings.Replace(this.ExitedAt.String(), "Timestamp", "types1.Timestamp", 1), `&`, ``, 1) + `,`,
+ `ExitedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ExitedAt), "Timestamp", "types1.Timestamp", 1), `&`, ``, 1) + `,`,
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
`}`,
}, "")
@@ -2347,8 +2507,13 @@ func (this *ListPidsResponse) String() string {
if this == nil {
return "nil"
}
+ repeatedStringForProcesses := "[]*ProcessInfo{"
+ for _, f := range this.Processes {
+ repeatedStringForProcesses += strings.Replace(fmt.Sprintf("%v", f), "ProcessInfo", "task.ProcessInfo", 1) + ","
+ }
+ repeatedStringForProcesses += "}"
s := strings.Join([]string{`&ListPidsResponse{`,
- `Processes:` + strings.Replace(fmt.Sprintf("%v", this.Processes), "ProcessInfo", "task.ProcessInfo", 1) + `,`,
+ `Processes:` + repeatedStringForProcesses + `,`,
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
`}`,
}, "")
@@ -2428,7 +2593,7 @@ func (this *WaitResponse) String() string {
}
s := strings.Join([]string{`&WaitResponse{`,
`ExitStatus:` + fmt.Sprintf("%v", this.ExitStatus) + `,`,
- `ExitedAt:` + strings.Replace(strings.Replace(this.ExitedAt.String(), "Timestamp", "types1.Timestamp", 1), `&`, ``, 1) + `,`,
+ `ExitedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ExitedAt), "Timestamp", "types1.Timestamp", 1), `&`, ``, 1) + `,`,
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
`}`,
}, "")
@@ -5302,6 +5467,7 @@ func (m *WaitResponse) Unmarshal(dAtA []byte) error {
func skipShim(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
+ depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
@@ -5333,10 +5499,8 @@ func skipShim(dAtA []byte) (n int, err error) {
break
}
}
- return iNdEx, nil
case 1:
iNdEx += 8
- return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
@@ -5357,55 +5521,30 @@ func skipShim(dAtA []byte) (n int, err error) {
return 0, ErrInvalidLengthShim
}
iNdEx += length
- if iNdEx < 0 {
- return 0, ErrInvalidLengthShim
- }
- return iNdEx, nil
case 3:
- for {
- var innerWire uint64
- var start int = iNdEx
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowShim
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- innerWire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- innerWireType := int(innerWire & 0x7)
- if innerWireType == 4 {
- break
- }
- next, err := skipShim(dAtA[start:])
- if err != nil {
- return 0, err
- }
- iNdEx = start + next
- if iNdEx < 0 {
- return 0, ErrInvalidLengthShim
- }
- }
- return iNdEx, nil
+ depth++
case 4:
- return iNdEx, nil
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupShim
+ }
+ depth--
case 5:
iNdEx += 4
- return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthShim
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
}
- panic("unreachable")
+ return 0, io.ErrUnexpectedEOF
}
var (
- ErrInvalidLengthShim = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowShim = fmt.Errorf("proto: integer overflow")
+ ErrInvalidLengthShim = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowShim = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupShim = fmt.Errorf("proto: unexpected end of group")
)
diff --git a/vendor/github.com/containerd/containerd/runtime/v2/binary.go b/vendor/github.com/containerd/containerd/runtime/v2/binary.go
index 30c72827d..4b3f4ab93 100644
--- a/vendor/github.com/containerd/containerd/runtime/v2/binary.go
+++ b/vendor/github.com/containerd/containerd/runtime/v2/binary.go
@@ -77,7 +77,13 @@ func (b *binary) Start(ctx context.Context, opts *types.Any, onClose func()) (_
}
// Windows needs a namespace when openShimLog
ns, _ := namespaces.Namespace(ctx)
- f, err := openShimLog(namespaces.WithNamespace(context.Background(), ns), b.bundle, client.AnonDialer)
+ shimCtx, cancelShimLog := context.WithCancel(namespaces.WithNamespace(context.Background(), ns))
+ defer func() {
+ if err != nil {
+ cancelShimLog()
+ }
+ }()
+ f, err := openShimLog(shimCtx, b.bundle, client.AnonDialer)
if err != nil {
return nil, errors.Wrap(err, "open shim log pipe")
}
@@ -106,7 +112,11 @@ func (b *binary) Start(ctx context.Context, opts *types.Any, onClose func()) (_
if err != nil {
return nil, err
}
- client := ttrpc.NewClient(conn, ttrpc.WithOnClose(onClose))
+ onCloseWithShimLog := func() {
+ onClose()
+ cancelShimLog()
+ }
+ client := ttrpc.NewClient(conn, ttrpc.WithOnClose(onCloseWithShimLog))
return &shim{
bundle: b.bundle,
client: client,
diff --git a/vendor/github.com/containerd/containerd/runtime/v2/runc/options/oci.pb.go b/vendor/github.com/containerd/containerd/runtime/v2/runc/options/oci.pb.go
index a24dbad71..f298452b6 100644
--- a/vendor/github.com/containerd/containerd/runtime/v2/runc/options/oci.pb.go
+++ b/vendor/github.com/containerd/containerd/runtime/v2/runc/options/oci.pb.go
@@ -8,6 +8,7 @@ import (
proto "github.com/gogo/protobuf/proto"
io "io"
math "math"
+ math_bits "math/bits"
reflect "reflect"
strings "strings"
)
@@ -21,7 +22,7 @@ var _ = math.Inf
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
type Options struct {
// disable pivot root when creating a container
@@ -64,7 +65,7 @@ func (m *Options) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Options.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -120,7 +121,7 @@ func (m *CheckpointOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, e
return xxx_messageInfo_CheckpointOptions.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -160,7 +161,7 @@ func (m *ProcessDetails) XXX_Marshal(b []byte, deterministic bool) ([]byte, erro
return xxx_messageInfo_ProcessDetails.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -233,7 +234,7 @@ var fileDescriptor_4e5440d739e9a863 = []byte{
func (m *Options) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -241,96 +242,108 @@ func (m *Options) Marshal() (dAtA []byte, err error) {
}
func (m *Options) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Options) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if m.NoPivotRoot {
- dAtA[i] = 0x8
- i++
- if m.NoPivotRoot {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i++
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
- if m.NoNewKeyring {
- dAtA[i] = 0x10
- i++
- if m.NoNewKeyring {
+ if len(m.CriuWorkPath) > 0 {
+ i -= len(m.CriuWorkPath)
+ copy(dAtA[i:], m.CriuWorkPath)
+ i = encodeVarintOci(dAtA, i, uint64(len(m.CriuWorkPath)))
+ i--
+ dAtA[i] = 0x5a
+ }
+ if len(m.CriuImagePath) > 0 {
+ i -= len(m.CriuImagePath)
+ copy(dAtA[i:], m.CriuImagePath)
+ i = encodeVarintOci(dAtA, i, uint64(len(m.CriuImagePath)))
+ i--
+ dAtA[i] = 0x52
+ }
+ if m.SystemdCgroup {
+ i--
+ if m.SystemdCgroup {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
- i++
- }
- if len(m.ShimCgroup) > 0 {
- dAtA[i] = 0x1a
- i++
- i = encodeVarintOci(dAtA, i, uint64(len(m.ShimCgroup)))
- i += copy(dAtA[i:], m.ShimCgroup)
+ i--
+ dAtA[i] = 0x48
}
- if m.IoUid != 0 {
- dAtA[i] = 0x20
- i++
- i = encodeVarintOci(dAtA, i, uint64(m.IoUid))
+ if len(m.CriuPath) > 0 {
+ i -= len(m.CriuPath)
+ copy(dAtA[i:], m.CriuPath)
+ i = encodeVarintOci(dAtA, i, uint64(len(m.CriuPath)))
+ i--
+ dAtA[i] = 0x42
}
- if m.IoGid != 0 {
- dAtA[i] = 0x28
- i++
- i = encodeVarintOci(dAtA, i, uint64(m.IoGid))
+ if len(m.Root) > 0 {
+ i -= len(m.Root)
+ copy(dAtA[i:], m.Root)
+ i = encodeVarintOci(dAtA, i, uint64(len(m.Root)))
+ i--
+ dAtA[i] = 0x3a
}
if len(m.BinaryName) > 0 {
- dAtA[i] = 0x32
- i++
+ i -= len(m.BinaryName)
+ copy(dAtA[i:], m.BinaryName)
i = encodeVarintOci(dAtA, i, uint64(len(m.BinaryName)))
- i += copy(dAtA[i:], m.BinaryName)
+ i--
+ dAtA[i] = 0x32
}
- if len(m.Root) > 0 {
- dAtA[i] = 0x3a
- i++
- i = encodeVarintOci(dAtA, i, uint64(len(m.Root)))
- i += copy(dAtA[i:], m.Root)
+ if m.IoGid != 0 {
+ i = encodeVarintOci(dAtA, i, uint64(m.IoGid))
+ i--
+ dAtA[i] = 0x28
}
- if len(m.CriuPath) > 0 {
- dAtA[i] = 0x42
- i++
- i = encodeVarintOci(dAtA, i, uint64(len(m.CriuPath)))
- i += copy(dAtA[i:], m.CriuPath)
+ if m.IoUid != 0 {
+ i = encodeVarintOci(dAtA, i, uint64(m.IoUid))
+ i--
+ dAtA[i] = 0x20
}
- if m.SystemdCgroup {
- dAtA[i] = 0x48
- i++
- if m.SystemdCgroup {
+ if len(m.ShimCgroup) > 0 {
+ i -= len(m.ShimCgroup)
+ copy(dAtA[i:], m.ShimCgroup)
+ i = encodeVarintOci(dAtA, i, uint64(len(m.ShimCgroup)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.NoNewKeyring {
+ i--
+ if m.NoNewKeyring {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
- i++
- }
- if len(m.CriuImagePath) > 0 {
- dAtA[i] = 0x52
- i++
- i = encodeVarintOci(dAtA, i, uint64(len(m.CriuImagePath)))
- i += copy(dAtA[i:], m.CriuImagePath)
- }
- if len(m.CriuWorkPath) > 0 {
- dAtA[i] = 0x5a
- i++
- i = encodeVarintOci(dAtA, i, uint64(len(m.CriuWorkPath)))
- i += copy(dAtA[i:], m.CriuWorkPath)
+ i--
+ dAtA[i] = 0x10
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if m.NoPivotRoot {
+ i--
+ if m.NoPivotRoot {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x8
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *CheckpointOptions) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -338,103 +351,106 @@ func (m *CheckpointOptions) Marshal() (dAtA []byte, err error) {
}
func (m *CheckpointOptions) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *CheckpointOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if m.Exit {
- dAtA[i] = 0x8
- i++
- if m.Exit {
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.WorkPath) > 0 {
+ i -= len(m.WorkPath)
+ copy(dAtA[i:], m.WorkPath)
+ i = encodeVarintOci(dAtA, i, uint64(len(m.WorkPath)))
+ i--
+ dAtA[i] = 0x4a
+ }
+ if len(m.ImagePath) > 0 {
+ i -= len(m.ImagePath)
+ copy(dAtA[i:], m.ImagePath)
+ i = encodeVarintOci(dAtA, i, uint64(len(m.ImagePath)))
+ i--
+ dAtA[i] = 0x42
+ }
+ if len(m.CgroupsMode) > 0 {
+ i -= len(m.CgroupsMode)
+ copy(dAtA[i:], m.CgroupsMode)
+ i = encodeVarintOci(dAtA, i, uint64(len(m.CgroupsMode)))
+ i--
+ dAtA[i] = 0x3a
+ }
+ if len(m.EmptyNamespaces) > 0 {
+ for iNdEx := len(m.EmptyNamespaces) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.EmptyNamespaces[iNdEx])
+ copy(dAtA[i:], m.EmptyNamespaces[iNdEx])
+ i = encodeVarintOci(dAtA, i, uint64(len(m.EmptyNamespaces[iNdEx])))
+ i--
+ dAtA[i] = 0x32
+ }
+ }
+ if m.FileLocks {
+ i--
+ if m.FileLocks {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
- i++
+ i--
+ dAtA[i] = 0x28
}
- if m.OpenTcp {
- dAtA[i] = 0x10
- i++
- if m.OpenTcp {
+ if m.Terminal {
+ i--
+ if m.Terminal {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
- i++
+ i--
+ dAtA[i] = 0x20
}
if m.ExternalUnixSockets {
- dAtA[i] = 0x18
- i++
+ i--
if m.ExternalUnixSockets {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
- i++
+ i--
+ dAtA[i] = 0x18
}
- if m.Terminal {
- dAtA[i] = 0x20
- i++
- if m.Terminal {
+ if m.OpenTcp {
+ i--
+ if m.OpenTcp {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
- i++
+ i--
+ dAtA[i] = 0x10
}
- if m.FileLocks {
- dAtA[i] = 0x28
- i++
- if m.FileLocks {
+ if m.Exit {
+ i--
+ if m.Exit {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
- i++
- }
- if len(m.EmptyNamespaces) > 0 {
- for _, s := range m.EmptyNamespaces {
- dAtA[i] = 0x32
- i++
- l = len(s)
- for l >= 1<<7 {
- dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
- l >>= 7
- i++
- }
- dAtA[i] = uint8(l)
- i++
- i += copy(dAtA[i:], s)
- }
- }
- if len(m.CgroupsMode) > 0 {
- dAtA[i] = 0x3a
- i++
- i = encodeVarintOci(dAtA, i, uint64(len(m.CgroupsMode)))
- i += copy(dAtA[i:], m.CgroupsMode)
- }
- if len(m.ImagePath) > 0 {
- dAtA[i] = 0x42
- i++
- i = encodeVarintOci(dAtA, i, uint64(len(m.ImagePath)))
- i += copy(dAtA[i:], m.ImagePath)
- }
- if len(m.WorkPath) > 0 {
- dAtA[i] = 0x4a
- i++
- i = encodeVarintOci(dAtA, i, uint64(len(m.WorkPath)))
- i += copy(dAtA[i:], m.WorkPath)
- }
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ i--
+ dAtA[i] = 0x8
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *ProcessDetails) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -442,30 +458,39 @@ func (m *ProcessDetails) Marshal() (dAtA []byte, err error) {
}
func (m *ProcessDetails) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ProcessDetails) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if len(m.ExecID) > 0 {
- dAtA[i] = 0xa
- i++
+ i -= len(m.ExecID)
+ copy(dAtA[i:], m.ExecID)
i = encodeVarintOci(dAtA, i, uint64(len(m.ExecID)))
- i += copy(dAtA[i:], m.ExecID)
- }
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func encodeVarintOci(dAtA []byte, offset int, v uint64) int {
+ offset -= sovOci(v)
+ base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
- return offset + 1
+ return base
}
func (m *Options) Size() (n int) {
if m == nil {
@@ -580,14 +605,7 @@ func (m *ProcessDetails) Size() (n int) {
}
func sovOci(x uint64) (n int) {
- for {
- n++
- x >>= 7
- if x == 0 {
- break
- }
- }
- return n
+ return (math_bits.Len64(x|1) + 6) / 7
}
func sozOci(x uint64) (n int) {
return sovOci(uint64((x << 1) ^ uint64((int64(x) >> 63))))
@@ -1366,6 +1384,7 @@ func (m *ProcessDetails) Unmarshal(dAtA []byte) error {
func skipOci(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
+ depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
@@ -1397,10 +1416,8 @@ func skipOci(dAtA []byte) (n int, err error) {
break
}
}
- return iNdEx, nil
case 1:
iNdEx += 8
- return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
@@ -1421,55 +1438,30 @@ func skipOci(dAtA []byte) (n int, err error) {
return 0, ErrInvalidLengthOci
}
iNdEx += length
- if iNdEx < 0 {
- return 0, ErrInvalidLengthOci
- }
- return iNdEx, nil
case 3:
- for {
- var innerWire uint64
- var start int = iNdEx
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowOci
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- innerWire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- innerWireType := int(innerWire & 0x7)
- if innerWireType == 4 {
- break
- }
- next, err := skipOci(dAtA[start:])
- if err != nil {
- return 0, err
- }
- iNdEx = start + next
- if iNdEx < 0 {
- return 0, ErrInvalidLengthOci
- }
- }
- return iNdEx, nil
+ depth++
case 4:
- return iNdEx, nil
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupOci
+ }
+ depth--
case 5:
iNdEx += 4
- return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthOci
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
}
- panic("unreachable")
+ return 0, io.ErrUnexpectedEOF
}
var (
- ErrInvalidLengthOci = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowOci = fmt.Errorf("proto: integer overflow")
+ ErrInvalidLengthOci = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowOci = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupOci = fmt.Errorf("proto: unexpected end of group")
)
diff --git a/vendor/github.com/containerd/containerd/runtime/v2/shim/util_unix.go b/vendor/github.com/containerd/containerd/runtime/v2/shim/util_unix.go
index 7ca65033f..093a66239 100644
--- a/vendor/github.com/containerd/containerd/runtime/v2/shim/util_unix.go
+++ b/vendor/github.com/containerd/containerd/runtime/v2/shim/util_unix.go
@@ -30,6 +30,7 @@ import (
"time"
"github.com/containerd/containerd/namespaces"
+ "github.com/containerd/containerd/pkg/dialer"
"github.com/containerd/containerd/sys"
"github.com/pkg/errors"
)
@@ -75,7 +76,7 @@ func SocketAddress(ctx context.Context, id string) (string, error) {
// AnonDialer returns a dialer for an abstract socket
func AnonDialer(address string, timeout time.Duration) (net.Conn, error) {
address = strings.TrimPrefix(address, "unix://")
- return net.DialTimeout("unix", "\x00"+address, timeout)
+ return dialer.Dialer("\x00"+address, timeout)
}
func AnonReconnectDialer(address string, timeout time.Duration) (net.Conn, error) {
diff --git a/vendor/github.com/containerd/containerd/runtime/v2/task/shim.pb.go b/vendor/github.com/containerd/containerd/runtime/v2/task/shim.pb.go
index 7ba9e677a..3cf11d8e3 100644
--- a/vendor/github.com/containerd/containerd/runtime/v2/task/shim.pb.go
+++ b/vendor/github.com/containerd/containerd/runtime/v2/task/shim.pb.go
@@ -14,6 +14,7 @@ import (
types1 "github.com/gogo/protobuf/types"
io "io"
math "math"
+ math_bits "math/bits"
reflect "reflect"
strings "strings"
time "time"
@@ -29,7 +30,7 @@ var _ = time.Kitchen
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
type CreateTaskRequest struct {
ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
@@ -60,7 +61,7 @@ func (m *CreateTaskRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, e
return xxx_messageInfo_CreateTaskRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -99,7 +100,7 @@ func (m *CreateTaskResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte,
return xxx_messageInfo_CreateTaskResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -139,7 +140,7 @@ func (m *DeleteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error
return xxx_messageInfo_DeleteRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -180,7 +181,7 @@ func (m *DeleteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, erro
return xxx_messageInfo_DeleteResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -225,7 +226,7 @@ func (m *ExecProcessRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte,
return xxx_messageInfo_ExecProcessRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -263,7 +264,7 @@ func (m *ExecProcessResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte,
return xxx_messageInfo_ExecProcessResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -305,7 +306,7 @@ func (m *ResizePtyRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, er
return xxx_messageInfo_ResizePtyRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -345,7 +346,7 @@ func (m *StateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
return xxx_messageInfo_StateRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -394,7 +395,7 @@ func (m *StateResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error
return xxx_messageInfo_StateResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -436,7 +437,7 @@ func (m *KillRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
return xxx_messageInfo_KillRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -477,7 +478,7 @@ func (m *CloseIORequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, erro
return xxx_messageInfo_CloseIORequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -516,7 +517,7 @@ func (m *PidsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
return xxx_messageInfo_PidsRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -555,7 +556,7 @@ func (m *PidsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
return xxx_messageInfo_PidsResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -596,7 +597,7 @@ func (m *CheckpointTaskRequest) XXX_Marshal(b []byte, deterministic bool) ([]byt
return xxx_messageInfo_CheckpointTaskRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -636,7 +637,7 @@ func (m *UpdateTaskRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, e
return xxx_messageInfo_UpdateTaskRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -676,7 +677,7 @@ func (m *StartRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
return xxx_messageInfo_StartRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -715,7 +716,7 @@ func (m *StartResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error
return xxx_messageInfo_StartResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -755,7 +756,7 @@ func (m *WaitRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
return xxx_messageInfo_WaitRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -795,7 +796,7 @@ func (m *WaitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
return xxx_messageInfo_WaitResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -834,7 +835,7 @@ func (m *StatsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
return xxx_messageInfo_StatsRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -873,7 +874,7 @@ func (m *StatsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error
return xxx_messageInfo_StatsResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -912,7 +913,7 @@ func (m *ConnectRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, erro
return xxx_messageInfo_ConnectRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -953,7 +954,7 @@ func (m *ConnectResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, err
return xxx_messageInfo_ConnectResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -993,7 +994,7 @@ func (m *ShutdownRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, err
return xxx_messageInfo_ShutdownRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -1032,7 +1033,7 @@ func (m *PauseRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
return xxx_messageInfo_PauseRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -1071,7 +1072,7 @@ func (m *ResumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error
return xxx_messageInfo_ResumeRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -1208,7 +1209,7 @@ var fileDescriptor_9202ee34bc3ad8ca = []byte{
func (m *CreateTaskRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1216,94 +1217,111 @@ func (m *CreateTaskRequest) Marshal() (dAtA []byte, err error) {
}
func (m *CreateTaskRequest) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *CreateTaskRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.ID) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintShim(dAtA, i, uint64(len(m.ID)))
- i += copy(dAtA[i:], m.ID)
- }
- if len(m.Bundle) > 0 {
- dAtA[i] = 0x12
- i++
- i = encodeVarintShim(dAtA, i, uint64(len(m.Bundle)))
- i += copy(dAtA[i:], m.Bundle)
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
- if len(m.Rootfs) > 0 {
- for _, msg := range m.Rootfs {
- dAtA[i] = 0x1a
- i++
- i = encodeVarintShim(dAtA, i, uint64(msg.Size()))
- n, err := msg.MarshalTo(dAtA[i:])
+ if m.Options != nil {
+ {
+ size, err := m.Options.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
- i += n
+ i -= size
+ i = encodeVarintShim(dAtA, i, uint64(size))
}
+ i--
+ dAtA[i] = 0x52
}
- if m.Terminal {
- dAtA[i] = 0x20
- i++
- if m.Terminal {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i++
- }
- if len(m.Stdin) > 0 {
- dAtA[i] = 0x2a
- i++
- i = encodeVarintShim(dAtA, i, uint64(len(m.Stdin)))
- i += copy(dAtA[i:], m.Stdin)
+ if len(m.ParentCheckpoint) > 0 {
+ i -= len(m.ParentCheckpoint)
+ copy(dAtA[i:], m.ParentCheckpoint)
+ i = encodeVarintShim(dAtA, i, uint64(len(m.ParentCheckpoint)))
+ i--
+ dAtA[i] = 0x4a
}
- if len(m.Stdout) > 0 {
- dAtA[i] = 0x32
- i++
- i = encodeVarintShim(dAtA, i, uint64(len(m.Stdout)))
- i += copy(dAtA[i:], m.Stdout)
+ if len(m.Checkpoint) > 0 {
+ i -= len(m.Checkpoint)
+ copy(dAtA[i:], m.Checkpoint)
+ i = encodeVarintShim(dAtA, i, uint64(len(m.Checkpoint)))
+ i--
+ dAtA[i] = 0x42
}
if len(m.Stderr) > 0 {
- dAtA[i] = 0x3a
- i++
+ i -= len(m.Stderr)
+ copy(dAtA[i:], m.Stderr)
i = encodeVarintShim(dAtA, i, uint64(len(m.Stderr)))
- i += copy(dAtA[i:], m.Stderr)
+ i--
+ dAtA[i] = 0x3a
}
- if len(m.Checkpoint) > 0 {
- dAtA[i] = 0x42
- i++
- i = encodeVarintShim(dAtA, i, uint64(len(m.Checkpoint)))
- i += copy(dAtA[i:], m.Checkpoint)
+ if len(m.Stdout) > 0 {
+ i -= len(m.Stdout)
+ copy(dAtA[i:], m.Stdout)
+ i = encodeVarintShim(dAtA, i, uint64(len(m.Stdout)))
+ i--
+ dAtA[i] = 0x32
}
- if len(m.ParentCheckpoint) > 0 {
- dAtA[i] = 0x4a
- i++
- i = encodeVarintShim(dAtA, i, uint64(len(m.ParentCheckpoint)))
- i += copy(dAtA[i:], m.ParentCheckpoint)
+ if len(m.Stdin) > 0 {
+ i -= len(m.Stdin)
+ copy(dAtA[i:], m.Stdin)
+ i = encodeVarintShim(dAtA, i, uint64(len(m.Stdin)))
+ i--
+ dAtA[i] = 0x2a
}
- if m.Options != nil {
- dAtA[i] = 0x52
- i++
- i = encodeVarintShim(dAtA, i, uint64(m.Options.Size()))
- n1, err := m.Options.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
+ if m.Terminal {
+ i--
+ if m.Terminal {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
}
- i += n1
+ i--
+ dAtA[i] = 0x20
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if len(m.Rootfs) > 0 {
+ for iNdEx := len(m.Rootfs) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Rootfs[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintShim(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ if len(m.Bundle) > 0 {
+ i -= len(m.Bundle)
+ copy(dAtA[i:], m.Bundle)
+ i = encodeVarintShim(dAtA, i, uint64(len(m.Bundle)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.ID) > 0 {
+ i -= len(m.ID)
+ copy(dAtA[i:], m.ID)
+ i = encodeVarintShim(dAtA, i, uint64(len(m.ID)))
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *CreateTaskResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1311,25 +1329,31 @@ func (m *CreateTaskResponse) Marshal() (dAtA []byte, err error) {
}
func (m *CreateTaskResponse) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *CreateTaskResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if m.Pid != 0 {
- dAtA[i] = 0x8
- i++
i = encodeVarintShim(dAtA, i, uint64(m.Pid))
+ i--
+ dAtA[i] = 0x8
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
- }
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *DeleteRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1337,32 +1361,40 @@ func (m *DeleteRequest) Marshal() (dAtA []byte, err error) {
}
func (m *DeleteRequest) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DeleteRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.ID) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintShim(dAtA, i, uint64(len(m.ID)))
- i += copy(dAtA[i:], m.ID)
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
if len(m.ExecID) > 0 {
- dAtA[i] = 0x12
- i++
+ i -= len(m.ExecID)
+ copy(dAtA[i:], m.ExecID)
i = encodeVarintShim(dAtA, i, uint64(len(m.ExecID)))
- i += copy(dAtA[i:], m.ExecID)
+ i--
+ dAtA[i] = 0x12
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if len(m.ID) > 0 {
+ i -= len(m.ID)
+ copy(dAtA[i:], m.ID)
+ i = encodeVarintShim(dAtA, i, uint64(len(m.ID)))
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *DeleteResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1370,38 +1402,44 @@ func (m *DeleteResponse) Marshal() (dAtA []byte, err error) {
}
func (m *DeleteResponse) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DeleteResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if m.Pid != 0 {
- dAtA[i] = 0x8
- i++
- i = encodeVarintShim(dAtA, i, uint64(m.Pid))
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
- if m.ExitStatus != 0 {
- dAtA[i] = 0x10
- i++
- i = encodeVarintShim(dAtA, i, uint64(m.ExitStatus))
+ n2, err2 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.ExitedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt):])
+ if err2 != nil {
+ return 0, err2
}
+ i -= n2
+ i = encodeVarintShim(dAtA, i, uint64(n2))
+ i--
dAtA[i] = 0x1a
- i++
- i = encodeVarintShim(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt)))
- n2, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.ExitedAt, dAtA[i:])
- if err != nil {
- return 0, err
+ if m.ExitStatus != 0 {
+ i = encodeVarintShim(dAtA, i, uint64(m.ExitStatus))
+ i--
+ dAtA[i] = 0x10
}
- i += n2
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if m.Pid != 0 {
+ i = encodeVarintShim(dAtA, i, uint64(m.Pid))
+ i--
+ dAtA[i] = 0x8
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *ExecProcessRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1409,70 +1447,83 @@ func (m *ExecProcessRequest) Marshal() (dAtA []byte, err error) {
}
func (m *ExecProcessRequest) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ExecProcessRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.ID) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintShim(dAtA, i, uint64(len(m.ID)))
- i += copy(dAtA[i:], m.ID)
- }
- if len(m.ExecID) > 0 {
- dAtA[i] = 0x12
- i++
- i = encodeVarintShim(dAtA, i, uint64(len(m.ExecID)))
- i += copy(dAtA[i:], m.ExecID)
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
- if m.Terminal {
- dAtA[i] = 0x18
- i++
- if m.Terminal {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
+ if m.Spec != nil {
+ {
+ size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintShim(dAtA, i, uint64(size))
}
- i++
+ i--
+ dAtA[i] = 0x3a
}
- if len(m.Stdin) > 0 {
- dAtA[i] = 0x22
- i++
- i = encodeVarintShim(dAtA, i, uint64(len(m.Stdin)))
- i += copy(dAtA[i:], m.Stdin)
+ if len(m.Stderr) > 0 {
+ i -= len(m.Stderr)
+ copy(dAtA[i:], m.Stderr)
+ i = encodeVarintShim(dAtA, i, uint64(len(m.Stderr)))
+ i--
+ dAtA[i] = 0x32
}
if len(m.Stdout) > 0 {
- dAtA[i] = 0x2a
- i++
+ i -= len(m.Stdout)
+ copy(dAtA[i:], m.Stdout)
i = encodeVarintShim(dAtA, i, uint64(len(m.Stdout)))
- i += copy(dAtA[i:], m.Stdout)
+ i--
+ dAtA[i] = 0x2a
}
- if len(m.Stderr) > 0 {
- dAtA[i] = 0x32
- i++
- i = encodeVarintShim(dAtA, i, uint64(len(m.Stderr)))
- i += copy(dAtA[i:], m.Stderr)
+ if len(m.Stdin) > 0 {
+ i -= len(m.Stdin)
+ copy(dAtA[i:], m.Stdin)
+ i = encodeVarintShim(dAtA, i, uint64(len(m.Stdin)))
+ i--
+ dAtA[i] = 0x22
}
- if m.Spec != nil {
- dAtA[i] = 0x3a
- i++
- i = encodeVarintShim(dAtA, i, uint64(m.Spec.Size()))
- n3, err := m.Spec.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
+ if m.Terminal {
+ i--
+ if m.Terminal {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
}
- i += n3
+ i--
+ dAtA[i] = 0x18
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if len(m.ExecID) > 0 {
+ i -= len(m.ExecID)
+ copy(dAtA[i:], m.ExecID)
+ i = encodeVarintShim(dAtA, i, uint64(len(m.ExecID)))
+ i--
+ dAtA[i] = 0x12
}
- return i, nil
+ if len(m.ID) > 0 {
+ i -= len(m.ID)
+ copy(dAtA[i:], m.ID)
+ i = encodeVarintShim(dAtA, i, uint64(len(m.ID)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
}
func (m *ExecProcessResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1480,20 +1531,26 @@ func (m *ExecProcessResponse) Marshal() (dAtA []byte, err error) {
}
func (m *ExecProcessResponse) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ExecProcessResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *ResizePtyRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1501,42 +1558,50 @@ func (m *ResizePtyRequest) Marshal() (dAtA []byte, err error) {
}
func (m *ResizePtyRequest) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ResizePtyRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.ID) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintShim(dAtA, i, uint64(len(m.ID)))
- i += copy(dAtA[i:], m.ID)
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
- if len(m.ExecID) > 0 {
- dAtA[i] = 0x12
- i++
- i = encodeVarintShim(dAtA, i, uint64(len(m.ExecID)))
- i += copy(dAtA[i:], m.ExecID)
+ if m.Height != 0 {
+ i = encodeVarintShim(dAtA, i, uint64(m.Height))
+ i--
+ dAtA[i] = 0x20
}
if m.Width != 0 {
- dAtA[i] = 0x18
- i++
i = encodeVarintShim(dAtA, i, uint64(m.Width))
+ i--
+ dAtA[i] = 0x18
}
- if m.Height != 0 {
- dAtA[i] = 0x20
- i++
- i = encodeVarintShim(dAtA, i, uint64(m.Height))
+ if len(m.ExecID) > 0 {
+ i -= len(m.ExecID)
+ copy(dAtA[i:], m.ExecID)
+ i = encodeVarintShim(dAtA, i, uint64(len(m.ExecID)))
+ i--
+ dAtA[i] = 0x12
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if len(m.ID) > 0 {
+ i -= len(m.ID)
+ copy(dAtA[i:], m.ID)
+ i = encodeVarintShim(dAtA, i, uint64(len(m.ID)))
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *StateRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1544,32 +1609,40 @@ func (m *StateRequest) Marshal() (dAtA []byte, err error) {
}
func (m *StateRequest) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *StateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.ID) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintShim(dAtA, i, uint64(len(m.ID)))
- i += copy(dAtA[i:], m.ID)
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
if len(m.ExecID) > 0 {
- dAtA[i] = 0x12
- i++
+ i -= len(m.ExecID)
+ copy(dAtA[i:], m.ExecID)
i = encodeVarintShim(dAtA, i, uint64(len(m.ExecID)))
- i += copy(dAtA[i:], m.ExecID)
+ i--
+ dAtA[i] = 0x12
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if len(m.ID) > 0 {
+ i -= len(m.ID)
+ copy(dAtA[i:], m.ID)
+ i = encodeVarintShim(dAtA, i, uint64(len(m.ID)))
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *StateResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1577,89 +1650,101 @@ func (m *StateResponse) Marshal() (dAtA []byte, err error) {
}
func (m *StateResponse) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *StateResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.ID) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintShim(dAtA, i, uint64(len(m.ID)))
- i += copy(dAtA[i:], m.ID)
- }
- if len(m.Bundle) > 0 {
- dAtA[i] = 0x12
- i++
- i = encodeVarintShim(dAtA, i, uint64(len(m.Bundle)))
- i += copy(dAtA[i:], m.Bundle)
- }
- if m.Pid != 0 {
- dAtA[i] = 0x18
- i++
- i = encodeVarintShim(dAtA, i, uint64(m.Pid))
- }
- if m.Status != 0 {
- dAtA[i] = 0x20
- i++
- i = encodeVarintShim(dAtA, i, uint64(m.Status))
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
- if len(m.Stdin) > 0 {
- dAtA[i] = 0x2a
- i++
- i = encodeVarintShim(dAtA, i, uint64(len(m.Stdin)))
- i += copy(dAtA[i:], m.Stdin)
+ if len(m.ExecID) > 0 {
+ i -= len(m.ExecID)
+ copy(dAtA[i:], m.ExecID)
+ i = encodeVarintShim(dAtA, i, uint64(len(m.ExecID)))
+ i--
+ dAtA[i] = 0x5a
}
- if len(m.Stdout) > 0 {
- dAtA[i] = 0x32
- i++
- i = encodeVarintShim(dAtA, i, uint64(len(m.Stdout)))
- i += copy(dAtA[i:], m.Stdout)
+ n4, err4 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.ExitedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt):])
+ if err4 != nil {
+ return 0, err4
}
- if len(m.Stderr) > 0 {
- dAtA[i] = 0x3a
- i++
- i = encodeVarintShim(dAtA, i, uint64(len(m.Stderr)))
- i += copy(dAtA[i:], m.Stderr)
+ i -= n4
+ i = encodeVarintShim(dAtA, i, uint64(n4))
+ i--
+ dAtA[i] = 0x52
+ if m.ExitStatus != 0 {
+ i = encodeVarintShim(dAtA, i, uint64(m.ExitStatus))
+ i--
+ dAtA[i] = 0x48
}
if m.Terminal {
- dAtA[i] = 0x40
- i++
+ i--
if m.Terminal {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
- i++
+ i--
+ dAtA[i] = 0x40
}
- if m.ExitStatus != 0 {
- dAtA[i] = 0x48
- i++
- i = encodeVarintShim(dAtA, i, uint64(m.ExitStatus))
+ if len(m.Stderr) > 0 {
+ i -= len(m.Stderr)
+ copy(dAtA[i:], m.Stderr)
+ i = encodeVarintShim(dAtA, i, uint64(len(m.Stderr)))
+ i--
+ dAtA[i] = 0x3a
}
- dAtA[i] = 0x52
- i++
- i = encodeVarintShim(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt)))
- n4, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.ExitedAt, dAtA[i:])
- if err != nil {
- return 0, err
+ if len(m.Stdout) > 0 {
+ i -= len(m.Stdout)
+ copy(dAtA[i:], m.Stdout)
+ i = encodeVarintShim(dAtA, i, uint64(len(m.Stdout)))
+ i--
+ dAtA[i] = 0x32
}
- i += n4
- if len(m.ExecID) > 0 {
- dAtA[i] = 0x5a
- i++
- i = encodeVarintShim(dAtA, i, uint64(len(m.ExecID)))
- i += copy(dAtA[i:], m.ExecID)
+ if len(m.Stdin) > 0 {
+ i -= len(m.Stdin)
+ copy(dAtA[i:], m.Stdin)
+ i = encodeVarintShim(dAtA, i, uint64(len(m.Stdin)))
+ i--
+ dAtA[i] = 0x2a
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if m.Status != 0 {
+ i = encodeVarintShim(dAtA, i, uint64(m.Status))
+ i--
+ dAtA[i] = 0x20
+ }
+ if m.Pid != 0 {
+ i = encodeVarintShim(dAtA, i, uint64(m.Pid))
+ i--
+ dAtA[i] = 0x18
+ }
+ if len(m.Bundle) > 0 {
+ i -= len(m.Bundle)
+ copy(dAtA[i:], m.Bundle)
+ i = encodeVarintShim(dAtA, i, uint64(len(m.Bundle)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.ID) > 0 {
+ i -= len(m.ID)
+ copy(dAtA[i:], m.ID)
+ i = encodeVarintShim(dAtA, i, uint64(len(m.ID)))
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *KillRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1667,47 +1752,55 @@ func (m *KillRequest) Marshal() (dAtA []byte, err error) {
}
func (m *KillRequest) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *KillRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.ID) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintShim(dAtA, i, uint64(len(m.ID)))
- i += copy(dAtA[i:], m.ID)
- }
- if len(m.ExecID) > 0 {
- dAtA[i] = 0x12
- i++
- i = encodeVarintShim(dAtA, i, uint64(len(m.ExecID)))
- i += copy(dAtA[i:], m.ExecID)
- }
- if m.Signal != 0 {
- dAtA[i] = 0x18
- i++
- i = encodeVarintShim(dAtA, i, uint64(m.Signal))
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
if m.All {
- dAtA[i] = 0x20
- i++
+ i--
if m.All {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
- i++
+ i--
+ dAtA[i] = 0x20
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if m.Signal != 0 {
+ i = encodeVarintShim(dAtA, i, uint64(m.Signal))
+ i--
+ dAtA[i] = 0x18
+ }
+ if len(m.ExecID) > 0 {
+ i -= len(m.ExecID)
+ copy(dAtA[i:], m.ExecID)
+ i = encodeVarintShim(dAtA, i, uint64(len(m.ExecID)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.ID) > 0 {
+ i -= len(m.ID)
+ copy(dAtA[i:], m.ID)
+ i = encodeVarintShim(dAtA, i, uint64(len(m.ID)))
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *CloseIORequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1715,42 +1808,50 @@ func (m *CloseIORequest) Marshal() (dAtA []byte, err error) {
}
func (m *CloseIORequest) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *CloseIORequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.ID) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintShim(dAtA, i, uint64(len(m.ID)))
- i += copy(dAtA[i:], m.ID)
- }
- if len(m.ExecID) > 0 {
- dAtA[i] = 0x12
- i++
- i = encodeVarintShim(dAtA, i, uint64(len(m.ExecID)))
- i += copy(dAtA[i:], m.ExecID)
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
if m.Stdin {
- dAtA[i] = 0x18
- i++
+ i--
if m.Stdin {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
- i++
+ i--
+ dAtA[i] = 0x18
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if len(m.ExecID) > 0 {
+ i -= len(m.ExecID)
+ copy(dAtA[i:], m.ExecID)
+ i = encodeVarintShim(dAtA, i, uint64(len(m.ExecID)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.ID) > 0 {
+ i -= len(m.ID)
+ copy(dAtA[i:], m.ID)
+ i = encodeVarintShim(dAtA, i, uint64(len(m.ID)))
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *PidsRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1758,26 +1859,33 @@ func (m *PidsRequest) Marshal() (dAtA []byte, err error) {
}
func (m *PidsRequest) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PidsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if len(m.ID) > 0 {
- dAtA[i] = 0xa
- i++
+ i -= len(m.ID)
+ copy(dAtA[i:], m.ID)
i = encodeVarintShim(dAtA, i, uint64(len(m.ID)))
- i += copy(dAtA[i:], m.ID)
- }
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *PidsResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1785,32 +1893,40 @@ func (m *PidsResponse) Marshal() (dAtA []byte, err error) {
}
func (m *PidsResponse) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PidsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if len(m.Processes) > 0 {
- for _, msg := range m.Processes {
- dAtA[i] = 0xa
- i++
- i = encodeVarintShim(dAtA, i, uint64(msg.Size()))
- n, err := msg.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
+ for iNdEx := len(m.Processes) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Processes[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintShim(dAtA, i, uint64(size))
}
- i += n
+ i--
+ dAtA[i] = 0xa
}
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
- }
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *CheckpointTaskRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1818,42 +1934,52 @@ func (m *CheckpointTaskRequest) Marshal() (dAtA []byte, err error) {
}
func (m *CheckpointTaskRequest) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *CheckpointTaskRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.ID) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintShim(dAtA, i, uint64(len(m.ID)))
- i += copy(dAtA[i:], m.ID)
- }
- if len(m.Path) > 0 {
- dAtA[i] = 0x12
- i++
- i = encodeVarintShim(dAtA, i, uint64(len(m.Path)))
- i += copy(dAtA[i:], m.Path)
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
if m.Options != nil {
- dAtA[i] = 0x1a
- i++
- i = encodeVarintShim(dAtA, i, uint64(m.Options.Size()))
- n5, err := m.Options.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
+ {
+ size, err := m.Options.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintShim(dAtA, i, uint64(size))
}
- i += n5
+ i--
+ dAtA[i] = 0x1a
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if len(m.Path) > 0 {
+ i -= len(m.Path)
+ copy(dAtA[i:], m.Path)
+ i = encodeVarintShim(dAtA, i, uint64(len(m.Path)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.ID) > 0 {
+ i -= len(m.ID)
+ copy(dAtA[i:], m.ID)
+ i = encodeVarintShim(dAtA, i, uint64(len(m.ID)))
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *UpdateTaskRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1861,36 +1987,45 @@ func (m *UpdateTaskRequest) Marshal() (dAtA []byte, err error) {
}
func (m *UpdateTaskRequest) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *UpdateTaskRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.ID) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintShim(dAtA, i, uint64(len(m.ID)))
- i += copy(dAtA[i:], m.ID)
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
if m.Resources != nil {
- dAtA[i] = 0x12
- i++
- i = encodeVarintShim(dAtA, i, uint64(m.Resources.Size()))
- n6, err := m.Resources.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
+ {
+ size, err := m.Resources.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintShim(dAtA, i, uint64(size))
}
- i += n6
+ i--
+ dAtA[i] = 0x12
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if len(m.ID) > 0 {
+ i -= len(m.ID)
+ copy(dAtA[i:], m.ID)
+ i = encodeVarintShim(dAtA, i, uint64(len(m.ID)))
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *StartRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1898,32 +2033,40 @@ func (m *StartRequest) Marshal() (dAtA []byte, err error) {
}
func (m *StartRequest) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *StartRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.ID) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintShim(dAtA, i, uint64(len(m.ID)))
- i += copy(dAtA[i:], m.ID)
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
if len(m.ExecID) > 0 {
- dAtA[i] = 0x12
- i++
+ i -= len(m.ExecID)
+ copy(dAtA[i:], m.ExecID)
i = encodeVarintShim(dAtA, i, uint64(len(m.ExecID)))
- i += copy(dAtA[i:], m.ExecID)
+ i--
+ dAtA[i] = 0x12
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if len(m.ID) > 0 {
+ i -= len(m.ID)
+ copy(dAtA[i:], m.ID)
+ i = encodeVarintShim(dAtA, i, uint64(len(m.ID)))
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *StartResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1931,25 +2074,31 @@ func (m *StartResponse) Marshal() (dAtA []byte, err error) {
}
func (m *StartResponse) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *StartResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if m.Pid != 0 {
- dAtA[i] = 0x8
- i++
i = encodeVarintShim(dAtA, i, uint64(m.Pid))
+ i--
+ dAtA[i] = 0x8
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
- }
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *WaitRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1957,32 +2106,40 @@ func (m *WaitRequest) Marshal() (dAtA []byte, err error) {
}
func (m *WaitRequest) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *WaitRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.ID) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintShim(dAtA, i, uint64(len(m.ID)))
- i += copy(dAtA[i:], m.ID)
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
if len(m.ExecID) > 0 {
- dAtA[i] = 0x12
- i++
+ i -= len(m.ExecID)
+ copy(dAtA[i:], m.ExecID)
i = encodeVarintShim(dAtA, i, uint64(len(m.ExecID)))
- i += copy(dAtA[i:], m.ExecID)
+ i--
+ dAtA[i] = 0x12
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if len(m.ID) > 0 {
+ i -= len(m.ID)
+ copy(dAtA[i:], m.ID)
+ i = encodeVarintShim(dAtA, i, uint64(len(m.ID)))
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *WaitResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1990,33 +2147,39 @@ func (m *WaitResponse) Marshal() (dAtA []byte, err error) {
}
func (m *WaitResponse) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *WaitResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if m.ExitStatus != 0 {
- dAtA[i] = 0x8
- i++
- i = encodeVarintShim(dAtA, i, uint64(m.ExitStatus))
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
- dAtA[i] = 0x12
- i++
- i = encodeVarintShim(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt)))
- n7, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.ExitedAt, dAtA[i:])
- if err != nil {
- return 0, err
+ n7, err7 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.ExitedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt):])
+ if err7 != nil {
+ return 0, err7
}
- i += n7
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ i -= n7
+ i = encodeVarintShim(dAtA, i, uint64(n7))
+ i--
+ dAtA[i] = 0x12
+ if m.ExitStatus != 0 {
+ i = encodeVarintShim(dAtA, i, uint64(m.ExitStatus))
+ i--
+ dAtA[i] = 0x8
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *StatsRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -2024,26 +2187,33 @@ func (m *StatsRequest) Marshal() (dAtA []byte, err error) {
}
func (m *StatsRequest) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *StatsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if len(m.ID) > 0 {
- dAtA[i] = 0xa
- i++
+ i -= len(m.ID)
+ copy(dAtA[i:], m.ID)
i = encodeVarintShim(dAtA, i, uint64(len(m.ID)))
- i += copy(dAtA[i:], m.ID)
- }
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *StatsResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -2051,30 +2221,38 @@ func (m *StatsResponse) Marshal() (dAtA []byte, err error) {
}
func (m *StatsResponse) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *StatsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if m.Stats != nil {
- dAtA[i] = 0xa
- i++
- i = encodeVarintShim(dAtA, i, uint64(m.Stats.Size()))
- n8, err := m.Stats.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
+ {
+ size, err := m.Stats.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintShim(dAtA, i, uint64(size))
}
- i += n8
- }
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *ConnectRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -2082,26 +2260,33 @@ func (m *ConnectRequest) Marshal() (dAtA []byte, err error) {
}
func (m *ConnectRequest) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ConnectRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if len(m.ID) > 0 {
- dAtA[i] = 0xa
- i++
+ i -= len(m.ID)
+ copy(dAtA[i:], m.ID)
i = encodeVarintShim(dAtA, i, uint64(len(m.ID)))
- i += copy(dAtA[i:], m.ID)
- }
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *ConnectResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -2109,36 +2294,43 @@ func (m *ConnectResponse) Marshal() (dAtA []byte, err error) {
}
func (m *ConnectResponse) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ConnectResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if m.ShimPid != 0 {
- dAtA[i] = 0x8
- i++
- i = encodeVarintShim(dAtA, i, uint64(m.ShimPid))
- }
- if m.TaskPid != 0 {
- dAtA[i] = 0x10
- i++
- i = encodeVarintShim(dAtA, i, uint64(m.TaskPid))
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
if len(m.Version) > 0 {
- dAtA[i] = 0x1a
- i++
+ i -= len(m.Version)
+ copy(dAtA[i:], m.Version)
i = encodeVarintShim(dAtA, i, uint64(len(m.Version)))
- i += copy(dAtA[i:], m.Version)
+ i--
+ dAtA[i] = 0x1a
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if m.TaskPid != 0 {
+ i = encodeVarintShim(dAtA, i, uint64(m.TaskPid))
+ i--
+ dAtA[i] = 0x10
+ }
+ if m.ShimPid != 0 {
+ i = encodeVarintShim(dAtA, i, uint64(m.ShimPid))
+ i--
+ dAtA[i] = 0x8
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *ShutdownRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -2146,36 +2338,43 @@ func (m *ShutdownRequest) Marshal() (dAtA []byte, err error) {
}
func (m *ShutdownRequest) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ShutdownRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.ID) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintShim(dAtA, i, uint64(len(m.ID)))
- i += copy(dAtA[i:], m.ID)
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
if m.Now {
- dAtA[i] = 0x10
- i++
+ i--
if m.Now {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
- i++
+ i--
+ dAtA[i] = 0x10
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if len(m.ID) > 0 {
+ i -= len(m.ID)
+ copy(dAtA[i:], m.ID)
+ i = encodeVarintShim(dAtA, i, uint64(len(m.ID)))
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *PauseRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -2183,26 +2382,33 @@ func (m *PauseRequest) Marshal() (dAtA []byte, err error) {
}
func (m *PauseRequest) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PauseRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if len(m.ID) > 0 {
- dAtA[i] = 0xa
- i++
+ i -= len(m.ID)
+ copy(dAtA[i:], m.ID)
i = encodeVarintShim(dAtA, i, uint64(len(m.ID)))
- i += copy(dAtA[i:], m.ID)
- }
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *ResumeRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -2210,30 +2416,39 @@ func (m *ResumeRequest) Marshal() (dAtA []byte, err error) {
}
func (m *ResumeRequest) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ResumeRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if len(m.ID) > 0 {
- dAtA[i] = 0xa
- i++
+ i -= len(m.ID)
+ copy(dAtA[i:], m.ID)
i = encodeVarintShim(dAtA, i, uint64(len(m.ID)))
- i += copy(dAtA[i:], m.ID)
- }
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func encodeVarintShim(dAtA []byte, offset int, v uint64) int {
+ offset -= sovShim(v)
+ base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
- return offset + 1
+ return base
}
func (m *CreateTaskRequest) Size() (n int) {
if m == nil {
@@ -2811,14 +3026,7 @@ func (m *ResumeRequest) Size() (n int) {
}
func sovShim(x uint64) (n int) {
- for {
- n++
- x >>= 7
- if x == 0 {
- break
- }
- }
- return n
+ return (math_bits.Len64(x|1) + 6) / 7
}
func sozShim(x uint64) (n int) {
return sovShim(uint64((x << 1) ^ uint64((int64(x) >> 63))))
@@ -2827,10 +3035,15 @@ func (this *CreateTaskRequest) String() string {
if this == nil {
return "nil"
}
+ repeatedStringForRootfs := "[]*Mount{"
+ for _, f := range this.Rootfs {
+ repeatedStringForRootfs += strings.Replace(fmt.Sprintf("%v", f), "Mount", "types.Mount", 1) + ","
+ }
+ repeatedStringForRootfs += "}"
s := strings.Join([]string{`&CreateTaskRequest{`,
`ID:` + fmt.Sprintf("%v", this.ID) + `,`,
`Bundle:` + fmt.Sprintf("%v", this.Bundle) + `,`,
- `Rootfs:` + strings.Replace(fmt.Sprintf("%v", this.Rootfs), "Mount", "types.Mount", 1) + `,`,
+ `Rootfs:` + repeatedStringForRootfs + `,`,
`Terminal:` + fmt.Sprintf("%v", this.Terminal) + `,`,
`Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`,
`Stdout:` + fmt.Sprintf("%v", this.Stdout) + `,`,
@@ -2873,7 +3086,7 @@ func (this *DeleteResponse) String() string {
s := strings.Join([]string{`&DeleteResponse{`,
`Pid:` + fmt.Sprintf("%v", this.Pid) + `,`,
`ExitStatus:` + fmt.Sprintf("%v", this.ExitStatus) + `,`,
- `ExitedAt:` + strings.Replace(strings.Replace(this.ExitedAt.String(), "Timestamp", "types1.Timestamp", 1), `&`, ``, 1) + `,`,
+ `ExitedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ExitedAt), "Timestamp", "types1.Timestamp", 1), `&`, ``, 1) + `,`,
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
`}`,
}, "")
@@ -2946,7 +3159,7 @@ func (this *StateResponse) String() string {
`Stderr:` + fmt.Sprintf("%v", this.Stderr) + `,`,
`Terminal:` + fmt.Sprintf("%v", this.Terminal) + `,`,
`ExitStatus:` + fmt.Sprintf("%v", this.ExitStatus) + `,`,
- `ExitedAt:` + strings.Replace(strings.Replace(this.ExitedAt.String(), "Timestamp", "types1.Timestamp", 1), `&`, ``, 1) + `,`,
+ `ExitedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ExitedAt), "Timestamp", "types1.Timestamp", 1), `&`, ``, 1) + `,`,
`ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`,
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
`}`,
@@ -2995,8 +3208,13 @@ func (this *PidsResponse) String() string {
if this == nil {
return "nil"
}
+ repeatedStringForProcesses := "[]*ProcessInfo{"
+ for _, f := range this.Processes {
+ repeatedStringForProcesses += strings.Replace(fmt.Sprintf("%v", f), "ProcessInfo", "task.ProcessInfo", 1) + ","
+ }
+ repeatedStringForProcesses += "}"
s := strings.Join([]string{`&PidsResponse{`,
- `Processes:` + strings.Replace(fmt.Sprintf("%v", this.Processes), "ProcessInfo", "task.ProcessInfo", 1) + `,`,
+ `Processes:` + repeatedStringForProcesses + `,`,
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
`}`,
}, "")
@@ -3068,7 +3286,7 @@ func (this *WaitResponse) String() string {
}
s := strings.Join([]string{`&WaitResponse{`,
`ExitStatus:` + fmt.Sprintf("%v", this.ExitStatus) + `,`,
- `ExitedAt:` + strings.Replace(strings.Replace(this.ExitedAt.String(), "Timestamp", "types1.Timestamp", 1), `&`, ``, 1) + `,`,
+ `ExitedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ExitedAt), "Timestamp", "types1.Timestamp", 1), `&`, ``, 1) + `,`,
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
`}`,
}, "")
@@ -6917,6 +7135,7 @@ func (m *ResumeRequest) Unmarshal(dAtA []byte) error {
func skipShim(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
+ depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
@@ -6948,10 +7167,8 @@ func skipShim(dAtA []byte) (n int, err error) {
break
}
}
- return iNdEx, nil
case 1:
iNdEx += 8
- return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
@@ -6972,55 +7189,30 @@ func skipShim(dAtA []byte) (n int, err error) {
return 0, ErrInvalidLengthShim
}
iNdEx += length
- if iNdEx < 0 {
- return 0, ErrInvalidLengthShim
- }
- return iNdEx, nil
case 3:
- for {
- var innerWire uint64
- var start int = iNdEx
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowShim
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- innerWire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- innerWireType := int(innerWire & 0x7)
- if innerWireType == 4 {
- break
- }
- next, err := skipShim(dAtA[start:])
- if err != nil {
- return 0, err
- }
- iNdEx = start + next
- if iNdEx < 0 {
- return 0, ErrInvalidLengthShim
- }
- }
- return iNdEx, nil
+ depth++
case 4:
- return iNdEx, nil
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupShim
+ }
+ depth--
case 5:
iNdEx += 4
- return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthShim
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
}
- panic("unreachable")
+ return 0, io.ErrUnexpectedEOF
}
var (
- ErrInvalidLengthShim = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowShim = fmt.Errorf("proto: integer overflow")
+ ErrInvalidLengthShim = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowShim = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupShim = fmt.Errorf("proto: unexpected end of group")
)
diff --git a/vendor/github.com/containerd/containerd/services/server/server.go b/vendor/github.com/containerd/containerd/services/server/server.go
index e31fec5df..129425382 100644
--- a/vendor/github.com/containerd/containerd/services/server/server.go
+++ b/vendor/github.com/containerd/containerd/services/server/server.go
@@ -52,6 +52,7 @@ import (
"github.com/pkg/errors"
bolt "go.etcd.io/bbolt"
"google.golang.org/grpc"
+ "google.golang.org/grpc/backoff"
"google.golang.org/grpc/credentials"
)
@@ -456,9 +457,14 @@ func (pc *proxyClients) getClient(address string) (*grpc.ClientConn, error) {
return c, nil
}
+ backoffConfig := backoff.DefaultConfig
+ backoffConfig.MaxDelay = 3 * time.Second
+ connParams := grpc.ConnectParams{
+ Backoff: backoffConfig,
+ }
gopts := []grpc.DialOption{
grpc.WithInsecure(),
- grpc.WithBackoffMaxDelay(3 * time.Second),
+ grpc.WithConnectParams(connParams),
grpc.WithContextDialer(dialer.ContextDialer),
// TODO(stevvooe): We may need to allow configuration of this on the client.
diff --git a/vendor/github.com/containerd/containerd/snapshots/windows/windows.go b/vendor/github.com/containerd/containerd/snapshots/windows/windows.go
index 95cf8ffaa..bf144f19b 100644
--- a/vendor/github.com/containerd/containerd/snapshots/windows/windows.go
+++ b/vendor/github.com/containerd/containerd/snapshots/windows/windows.go
@@ -23,6 +23,7 @@ import (
"encoding/json"
"os"
"path/filepath"
+ "strconv"
"strings"
winfs "github.com/Microsoft/go-winio/pkg/fs"
@@ -48,6 +49,10 @@ func init() {
})
}
+const (
+ rootfsSizeLabel = "containerd.io/snapshot/io.microsoft.container.storage.rootfs.size-gb"
+)
+
type snapshotter struct {
root string
info hcsshim.DriverInfo
@@ -332,7 +337,26 @@ func (s *snapshotter) createSnapshot(ctx context.Context, kind snapshots.Kind, k
return nil, errors.Wrap(err, "failed to create sandbox layer")
}
- // TODO(darrenstahlmsft): Allow changing sandbox size
+ var snapshotInfo snapshots.Info
+ for _, o := range opts {
+ o(&snapshotInfo)
+ }
+
+ var sizeGB int
+ if sizeGBstr, ok := snapshotInfo.Labels[rootfsSizeLabel]; ok {
+ i32, err := strconv.ParseInt(sizeGBstr, 10, 32)
+ if err != nil {
+ return nil, errors.Wrapf(err, "failed to parse annotation %q=%q", rootfsSizeLabel, sizeGBstr)
+ }
+ sizeGB = int(i32)
+ }
+
+ if sizeGB > 0 {
+ const gbToByte = 1024 * 1024 * 1024
+ if err := hcsshim.ExpandSandboxSize(s.info, newSnapshot.ID, uint64(gbToByte*sizeGB)); err != nil {
+ return nil, errors.Wrapf(err, "failed to expand scratch size to %d GB", sizeGB)
+ }
+ }
}
if err := t.Commit(); err != nil {
diff --git a/vendor/github.com/containerd/containerd/vendor.conf b/vendor/github.com/containerd/containerd/vendor.conf
index ecedabd8f..f7af5cc55 100644
--- a/vendor/github.com/containerd/containerd/vendor.conf
+++ b/vendor/github.com/containerd/containerd/vendor.conf
@@ -1,6 +1,7 @@
-github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9
+github.com/beorn7/perks 37c8de3658fcb183f997c4e13e8337516ab753e6 # v1.0.1
github.com/BurntSushi/toml 3012a1dbe2e4bd1391d42b32f0577cb7bbc7f005 # v0.3.1
-github.com/containerd/btrfs af5082808c833de0e79c1e72eea9fea239364877
+github.com/cespare/xxhash/v2 d7df74196a9e781ede915320c11c378c1b2f3a1f # v2.1.1
+github.com/containerd/btrfs 153935315f4ab9be5bf03650a1341454b05efa5d
github.com/containerd/cgroups 7347743e5d1e8500d9f27c8e748e689ed991d92b
github.com/containerd/console 8375c3424e4d7b114e8a90a4a40c8e1b40d1d4e6
github.com/containerd/continuity 0ec596719c75bfd42908850990acea594b7593ac
@@ -11,15 +12,15 @@ github.com/containerd/typeurl a93fcdb778cd272c6e9b3028b2f4
github.com/coreos/go-systemd/v22 2d78030078ef61b3cae27f42ad6d0e46db51b339 # v22.0.0
github.com/cpuguy83/go-md2man 7762f7e404f8416dfa1d9bb6a8c192aa9acb4d19 # v1.0.10
github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9
-github.com/docker/go-metrics 4ea375f7759c82740c893fc030bc37088d2ec098
+github.com/docker/go-metrics b619b3592b65de4f087d9f16863a7e6ff905973c # v0.0.1
github.com/docker/go-units 519db1ee28dcc9fd2474ae59fca29a810482bfb1 # v0.4.0
github.com/godbus/dbus/v5 37bf87eef99d69c4f1d3528bd66e3a87dc201472 # v5.0.3
-github.com/gogo/googleapis d31c731455cb061f42baff3bda55bad0118b126b # v1.2.0
-github.com/gogo/protobuf ba06b47c162d49f2af050fb4c75bcbc86a159d5c # v1.2.1
-github.com/golang/protobuf aa810b61a9c79d51363740d207bb46cf8e620ed5 # v1.2.0
+github.com/gogo/googleapis 01e0f9cca9b92166042241267ee2a5cdf5cff46c # v1.3.2
+github.com/gogo/protobuf 5628607bb4c51c3157aacc3a50f0ab707582b805 # v1.3.1
+github.com/golang/protobuf d23c5127dc24889085f8ccea5c9d560a57a879d8 # v1.3.3
github.com/google/go-cmp 3af367b6b30c263d47e8895973edcca9a49cf029 # v0.2.0
github.com/google/uuid 0cd6bf5da1e1c83f8b45653022c74f71af0538a4 # v1.1.1
-github.com/grpc-ecosystem/go-grpc-prometheus 6b7015e65d366bf3f19b2b2a000a831940f0f7e0
+github.com/grpc-ecosystem/go-grpc-prometheus c225b8c3b01faf2899099b768856a9e916e5087b # v1.2.0
github.com/hashicorp/errwrap 8a6fb523712970c966eefc6b39ed2c5e74880354 # v1.0.0
github.com/hashicorp/go-multierror 886a7fbe3eb1c874d46f623bfa70af45f425b3d1 # v1.0.0
github.com/hashicorp/golang-lru 7f827b33c0f158ec5dfbba01bb0b14a4541fd81d # v0.5.3
@@ -27,16 +28,16 @@ github.com/imdario/mergo 7c29201646fa3de8506f70121347
github.com/konsorten/go-windows-terminal-sequences 5c8c8bd35d3832f5d134ae1e1e375b69a4d25242 # v1.0.1
github.com/matttproud/golang_protobuf_extensions c12348ce28de40eed0136aa2b644d0ee0650e56c # v1.0.1
github.com/Microsoft/go-winio 6c72808b55902eae4c5943626030429ff20f3b63 # v0.4.14
-github.com/Microsoft/hcsshim b3f49c06ffaeef24d09c6c08ec8ec8425a0303e2 # v0.8.7
+github.com/Microsoft/hcsshim 0b571ac85d7c5842b26d2571de4868634a4c39d7 # v0.8.7-24-g0b571ac8
github.com/opencontainers/go-digest c9281466c8b2f606084ac71339773efd177436e7
github.com/opencontainers/image-spec d60099175f88c47cd379c4738d158884749ed235 # v1.0.1
-github.com/opencontainers/runc d736ef14f0288d6993a1845745d6756cfc9ddd5a # v1.0.0-rc9
+github.com/opencontainers/runc dc9208a3303feef5b3839f4323d9beb36df0a9dd # v1.0.0-rc10
github.com/opencontainers/runtime-spec 29686dbc5559d93fb1ef402eeda3e35c38d75af4 # v1.0.1-59-g29686db
github.com/pkg/errors ba968bfe8b2f7e042a574c888954fccecfa385b4 # v0.8.1
-github.com/prometheus/client_golang f4fb1b73fb099f396a7f0036bf86aa8def4ed823
-github.com/prometheus/client_model 99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c
-github.com/prometheus/common 89604d197083d4781071d3c65855d24ecfb0a563
-github.com/prometheus/procfs cb4147076ac75738c9a7d279075a253c0cc5acbd
+github.com/prometheus/client_golang c42bebe5a5cddfc6b28cd639103369d8a75dfa89 # v1.3.0
+github.com/prometheus/client_model d1d2010b5beead3fa1c5f271a5cf626e40b3ad6e # v0.1.0
+github.com/prometheus/common 287d3e634a1e550c9e463dd7e5a75a422c614505 # v0.7.0
+github.com/prometheus/procfs 6d489fc7f1d9cd890a250f3ea3431b1744b9623f # v0.0.8
github.com/russross/blackfriday 05f3235734ad95d0016f6a23902f06461fcf567a # v1.5.2
github.com/sirupsen/logrus 8bdbc7bcc01dcbb8ec23dc8a28e332258d25251f # v1.4.1
github.com/syndtr/gocapability d98352740cb2c55f81556b63d4a1ec64c5a319c2
@@ -47,12 +48,12 @@ golang.org/x/net f3200d17e092c607f615320ecaad
golang.org/x/sync 42b317875d0fa942474b76e1b46a6060d720ae6e
golang.org/x/sys c990c680b611ac1aeb7d8f2af94a825f98d69720 https://github.com/golang/sys
golang.org/x/text 19e51611da83d6be54ddafce4a4af510cb3e9ea4
-google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944
-google.golang.org/grpc 39e8a7b072a67ca2a75f57fa2e0d50995f5b22f6 # v1.23.1
-gotest.tools 1083505acf35a0bd8a696b26837e1fb3187a7a83 # v2.3.0
+google.golang.org/genproto e50cd9704f63023d62cd06a1994b98227fc4d21a
+google.golang.org/grpc f495f5b15ae7ccda3b38c53a1bfcde4c1a58a2bc # v1.27.1
+gotest.tools/v3 bb0d8a963040ea5048dcef1a14d8f8b58a33d4b3 # v3.0.2
# cri dependencies
-github.com/containerd/cri c9d45e65263e26f7e7f0ac8fdca0d510622f12cb # master
+github.com/containerd/cri c0294ebfe0b4342db85c0faf7727ceb8d8c3afce # master
github.com/containerd/go-cni 0d360c50b10b350b6bb23863fd4dfb1c232b01c9
github.com/containernetworking/cni 4cfb7b568922a3c79a23e438dc52fe537fc9687e # v0.7.1
github.com/containernetworking/plugins 9f96827c7cabb03f21d86326000c00f61e181f6a # v0.7.6
@@ -62,25 +63,26 @@ github.com/docker/docker d1d5f6476656c6aad457e2a91d34
github.com/docker/spdystream 449fdfce4d962303d702fec724ef0ad181c92528
github.com/emicklei/go-restful b993709ae1a4f6dd19cfa475232614441b11c9d5 # v2.9.5
github.com/google/gofuzz f140a6486e521aad38f5917de355cbf147cc0496 # v1.0.0
-github.com/json-iterator/go 27518f6661eba504be5a7a9a9f6d9460d892ade3 # v1.1.7
+github.com/json-iterator/go 03217c3e97663914aec3faafde50d081f197a0a2 # v1.1.8
github.com/modern-go/concurrent bacd9c7ef1dd9b15be4a9909b8ac7a4e313eec94 # 1.0.3
github.com/modern-go/reflect2 4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd # 1.0.1
-github.com/opencontainers/selinux 3a1f366feb7aecbf7a0e71ac4cea88b31597de9e # v1.2.2
+github.com/opencontainers/selinux 31f70552238c5e017d78c3f1ba65e85f593f48e0 # 1.3.3
github.com/seccomp/libseccomp-golang 689e3c1541a84461afc49c1c87352a6cedf72e9c # v0.9.1
+github.com/stretchr/testify 221dbe5ed46703ee255b1da0dec05086f5035f62 # v1.4.0
github.com/tchap/go-patricia 666120de432aea38ab06bd5c818f04f4129882c9 # v2.2.6
-golang.org/x/crypto 5c40567a22f818bd14a1ea7245dad9f8ef0691aa
+golang.org/x/crypto 1d94cc7ab1c630336ab82ccb9c9cda72a875c382
golang.org/x/oauth2 0f29369cfe4552d0e4bcddc57cc75f4d7e672a33
-golang.org/x/time 85acf8d2951cb2a3bde7632f9ff273ef0379bcbd
-gopkg.in/inf.v0 3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4 # v0.9.0
-gopkg.in/yaml.v2 51d6538a90f86fe93ac480b35f37b2be17fef232 # v2.2.2
-k8s.io/api d2ab659560cb09bd6c9a3011b6468f0025c65e63 # kubernetes-1.16.0-rc.2
-k8s.io/apimachinery 27d36303b6556f377b4f34e64705fa9024a12b0c # kubernetes-1.16.0-rc.2
-k8s.io/apiserver 5669a5603d961de1ecb7a73b85199e4799081334 # kubernetes-1.16.0-rc.2
-k8s.io/client-go 5ff489491ea74e098486c3530bbfa0171dc7bf95 # kubernetes-1.16.0-rc.2
-k8s.io/cri-api 608eb1dad4ac015f9d4f36a3fc70ad3e579af892 # kubernetes-1.16.0-rc.2
-k8s.io/klog 3ca30a56d8a775276f9cdae009ba326fdc05af7f # v0.4.0
-k8s.io/kubernetes 4cb51f0d2d8392ea12aeae13182b41008b3a268e # v1.16.0-rc.2
-k8s.io/utils c2654d5206da6b7b6ace12841e8f359bb89b443c
+golang.org/x/time 9d24e82272b4f38b78bc8cff74fa936d31ccd8ef
+gopkg.in/inf.v0 d2d2541c53f18d2a059457998ce2876cc8e67cbf # v0.9.1
+gopkg.in/yaml.v2 53403b58ad1b561927d19068c655246f2db79d48 # v2.2.8
+k8s.io/api 7643814f1c97f24ccfb38c2b85a7bb3c7f494346 # kubernetes-1.17.1
+k8s.io/apimachinery 79c2a76c473a20cdc4ce59cae4b72529b5d9d16b # kubernetes-1.17.1
+k8s.io/apiserver 5381f05fcb881d39af12eeecab5645364229300c # kubernetes-1.17.1
+k8s.io/client-go 69012f50f4b0243bccdb82c24402a10224a91f51 # kubernetes-1.17.1
+k8s.io/cri-api 775aa3c1cf7380ba8b7362f5a52f1e6d2e130bb9 # kubernetes-1.17.1
+k8s.io/klog 2ca9ad30301bf30a8a6e0fa2110db6b8df699a91 # v1.0.0
+k8s.io/kubernetes d224476cd0730baca2b6e357d144171ed74192d6 # v1.17.1
+k8s.io/utils e782cd3c129fc98ee807f3c889c0f26eb7c9daf5
sigs.k8s.io/yaml fd68e9863619f6ec2fdd8625fe1f02e7c877e480 # v1.1.0
# zfs dependencies
diff --git a/vendor/github.com/containerd/continuity/fs/copy.go b/vendor/github.com/containerd/continuity/fs/copy.go
index ad61022ad..818bba2cd 100644
--- a/vendor/github.com/containerd/continuity/fs/copy.go
+++ b/vendor/github.com/containerd/continuity/fs/copy.go
@@ -80,7 +80,7 @@ func copyDirectory(dst, src string, inodes map[uint64]string, o *copyDirOpts) er
return errors.Wrapf(err, "failed to stat %s", src)
}
if !stat.IsDir() {
- return errors.Errorf("source is not directory")
+ return errors.Errorf("source %s is not directory", src)
}
if st, err := os.Stat(dst); err != nil {
@@ -104,6 +104,10 @@ func copyDirectory(dst, src string, inodes map[uint64]string, o *copyDirOpts) er
return errors.Wrapf(err, "failed to copy file info for %s", dst)
}
+ if err := copyXAttrs(dst, src, o.xeh); err != nil {
+ return errors.Wrap(err, "failed to copy xattrs")
+ }
+
for _, fi := range fis {
source := filepath.Join(src, fi.Name())
target := filepath.Join(dst, fi.Name())
diff --git a/vendor/github.com/containerd/continuity/fs/copy_linux.go b/vendor/github.com/containerd/continuity/fs/copy_linux.go
index 81c71522a..72bae7d4e 100644
--- a/vendor/github.com/containerd/continuity/fs/copy_linux.go
+++ b/vendor/github.com/containerd/continuity/fs/copy_linux.go
@@ -51,7 +51,10 @@ func copyFileInfo(fi os.FileInfo, name string) error {
}
}
- timespec := []unix.Timespec{unix.Timespec(StatAtime(st)), unix.Timespec(StatMtime(st))}
+ timespec := []unix.Timespec{
+ unix.NsecToTimespec(syscall.TimespecToNsec(StatAtime(st))),
+ unix.NsecToTimespec(syscall.TimespecToNsec(StatMtime(st))),
+ }
if err := unix.UtimesNanoAt(unix.AT_FDCWD, name, timespec, unix.AT_SYMLINK_NOFOLLOW); err != nil {
return errors.Wrapf(err, "failed to utime %s", name)
}
diff --git a/vendor/github.com/containerd/continuity/go.mod b/vendor/github.com/containerd/continuity/go.mod
new file mode 100644
index 000000000..86a7f148c
--- /dev/null
+++ b/vendor/github.com/containerd/continuity/go.mod
@@ -0,0 +1,23 @@
+module github.com/containerd/continuity
+
+go 1.11
+
+require (
+ bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898
+ github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4
+ github.com/golang/protobuf v1.2.0
+ github.com/inconshreveable/mousetrap v1.0.0 // indirect
+ github.com/onsi/ginkgo v1.10.1 // indirect
+ github.com/onsi/gomega v1.7.0 // indirect
+ github.com/opencontainers/go-digest v1.0.0-rc1
+ github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7
+ github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2
+ github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee
+ github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95 // indirect
+ github.com/stretchr/testify v1.4.0 // indirect
+ golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3 // indirect
+ golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f
+ golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e
+ gopkg.in/airbrake/gobrake.v2 v2.0.9 // indirect
+ gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2 // indirect
+)
diff --git a/vendor/github.com/containerd/continuity/vendor.conf b/vendor/github.com/containerd/continuity/vendor.conf
deleted file mode 100644
index 5bd88d5fd..000000000
--- a/vendor/github.com/containerd/continuity/vendor.conf
+++ /dev/null
@@ -1,13 +0,0 @@
-bazil.org/fuse 371fbbdaa8987b715bdd21d6adc4c9b20155f748
-github.com/dustin/go-humanize bb3d318650d48840a39aa21a027c6630e198e626
-github.com/golang/protobuf 1e59b77b52bf8e4b449a57e6f79f21226d571845
-github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75
-github.com/opencontainers/go-digest 279bed98673dd5bef374d3b6e4b09e2af76183bf
-github.com/pkg/errors f15c970de5b76fac0b59abb32d62c17cc7bed265
-github.com/sirupsen/logrus 89742aefa4b206dcf400792f3bd35b542998eb3b
-github.com/spf13/cobra 2da4a54c5ceefcee7ca5dd0eea1e18a3b6366489
-github.com/spf13/pflag 4c012f6dcd9546820e378d0bdda4d8fc772cdfea
-golang.org/x/crypto 9f005a07e0d31d45e6656d241bb5c0f2efd4bc94
-golang.org/x/net a337091b0525af65de94df2eb7e98bd9962dcbe2
-golang.org/x/sync 450f422ab23cf9881c94e2db30cac0eb1b7cf80c
-golang.org/x/sys 77b0e4315053a57ed2962443614bdb28db152054
diff --git a/vendor/github.com/docker/go-metrics/LICENSE.code b/vendor/github.com/docker/go-metrics/LICENSE
similarity index 100%
rename from vendor/github.com/docker/go-metrics/LICENSE.code
rename to vendor/github.com/docker/go-metrics/LICENSE
diff --git a/vendor/github.com/docker/go-metrics/README.md b/vendor/github.com/docker/go-metrics/README.md
index da6b16c20..a9e947cb5 100644
--- a/vendor/github.com/docker/go-metrics/README.md
+++ b/vendor/github.com/docker/go-metrics/README.md
@@ -68,6 +68,18 @@ If you need to use a unit but it is not defined in the package please open a PR
Package documentation can be found [here](https://godoc.org/github.com/docker/go-metrics).
+## HTTP Metrics
+
+To instrument a http handler, you can wrap the code like this:
+
+```go
+namespace := metrics.NewNamespace("docker_distribution", "http", metrics.Labels{"handler": "your_http_handler_name"})
+httpMetrics := namespace.NewDefaultHttpMetrics()
+metrics.Register(namespace)
+instrumentedHandler = metrics.InstrumentHandler(httpMetrics, unInstrumentedHandler)
+```
+Note: The `handler` label must be provided when a new namespace is created.
+
## Additional Metrics
Additional metrics are also defined here that are not available in the prometheus client.
diff --git a/vendor/github.com/docker/go-metrics/go.mod b/vendor/github.com/docker/go-metrics/go.mod
new file mode 100644
index 000000000..7e328f0cf
--- /dev/null
+++ b/vendor/github.com/docker/go-metrics/go.mod
@@ -0,0 +1,5 @@
+module github.com/docker/go-metrics
+
+go 1.11
+
+require github.com/prometheus/client_golang v1.1.0
diff --git a/vendor/github.com/docker/go-metrics/handler.go b/vendor/github.com/docker/go-metrics/handler.go
index bb3be41d3..05601e9ec 100644
--- a/vendor/github.com/docker/go-metrics/handler.go
+++ b/vendor/github.com/docker/go-metrics/handler.go
@@ -4,10 +4,71 @@ import (
"net/http"
"github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/client_golang/prometheus/promhttp"
+)
+
+// HTTPHandlerOpts describes a set of configurable options of http metrics
+type HTTPHandlerOpts struct {
+ DurationBuckets []float64
+ RequestSizeBuckets []float64
+ ResponseSizeBuckets []float64
+}
+
+const (
+ InstrumentHandlerResponseSize = iota
+ InstrumentHandlerRequestSize
+ InstrumentHandlerDuration
+ InstrumentHandlerCounter
+ InstrumentHandlerInFlight
+)
+
+type HTTPMetric struct {
+ prometheus.Collector
+ handlerType int
+}
+
+var (
+ defaultDurationBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10, 25, 60}
+ defaultRequestSizeBuckets = prometheus.ExponentialBuckets(1024, 2, 22) //1K to 4G
+ defaultResponseSizeBuckets = defaultRequestSizeBuckets
)
// Handler returns the global http.Handler that provides the prometheus
-// metrics format on GET requests
+// metrics format on GET requests. This handler is no longer instrumented.
func Handler() http.Handler {
- return prometheus.Handler()
+ return promhttp.Handler()
+}
+
+func InstrumentHandler(metrics []*HTTPMetric, handler http.Handler) http.HandlerFunc {
+ return InstrumentHandlerFunc(metrics, handler.ServeHTTP)
+}
+
+func InstrumentHandlerFunc(metrics []*HTTPMetric, handlerFunc http.HandlerFunc) http.HandlerFunc {
+ var handler http.Handler
+ handler = http.HandlerFunc(handlerFunc)
+ for _, metric := range metrics {
+ switch metric.handlerType {
+ case InstrumentHandlerResponseSize:
+ if collector, ok := metric.Collector.(prometheus.ObserverVec); ok {
+ handler = promhttp.InstrumentHandlerResponseSize(collector, handler)
+ }
+ case InstrumentHandlerRequestSize:
+ if collector, ok := metric.Collector.(prometheus.ObserverVec); ok {
+ handler = promhttp.InstrumentHandlerRequestSize(collector, handler)
+ }
+ case InstrumentHandlerDuration:
+ if collector, ok := metric.Collector.(prometheus.ObserverVec); ok {
+ handler = promhttp.InstrumentHandlerDuration(collector, handler)
+ }
+ case InstrumentHandlerCounter:
+ if collector, ok := metric.Collector.(*prometheus.CounterVec); ok {
+ handler = promhttp.InstrumentHandlerCounter(collector, handler)
+ }
+ case InstrumentHandlerInFlight:
+ if collector, ok := metric.Collector.(prometheus.Gauge); ok {
+ handler = promhttp.InstrumentHandlerInFlight(collector, handler)
+ }
+ }
+ }
+ return handler.ServeHTTP
}
diff --git a/vendor/github.com/docker/go-metrics/namespace.go b/vendor/github.com/docker/go-metrics/namespace.go
index 7734c2945..798315451 100644
--- a/vendor/github.com/docker/go-metrics/namespace.go
+++ b/vendor/github.com/docker/go-metrics/namespace.go
@@ -179,3 +179,137 @@ func makeName(name string, unit Unit) string {
return fmt.Sprintf("%s_%s", name, unit)
}
+
+func (n *Namespace) NewDefaultHttpMetrics(handlerName string) []*HTTPMetric {
+ return n.NewHttpMetricsWithOpts(handlerName, HTTPHandlerOpts{
+ DurationBuckets: defaultDurationBuckets,
+ RequestSizeBuckets: defaultResponseSizeBuckets,
+ ResponseSizeBuckets: defaultResponseSizeBuckets,
+ })
+}
+
+func (n *Namespace) NewHttpMetrics(handlerName string, durationBuckets, requestSizeBuckets, responseSizeBuckets []float64) []*HTTPMetric {
+ return n.NewHttpMetricsWithOpts(handlerName, HTTPHandlerOpts{
+ DurationBuckets: durationBuckets,
+ RequestSizeBuckets: requestSizeBuckets,
+ ResponseSizeBuckets: responseSizeBuckets,
+ })
+}
+
+func (n *Namespace) NewHttpMetricsWithOpts(handlerName string, opts HTTPHandlerOpts) []*HTTPMetric {
+ var httpMetrics []*HTTPMetric
+ inFlightMetric := n.NewInFlightGaugeMetric(handlerName)
+ requestTotalMetric := n.NewRequestTotalMetric(handlerName)
+ requestDurationMetric := n.NewRequestDurationMetric(handlerName, opts.DurationBuckets)
+ requestSizeMetric := n.NewRequestSizeMetric(handlerName, opts.RequestSizeBuckets)
+ responseSizeMetric := n.NewResponseSizeMetric(handlerName, opts.ResponseSizeBuckets)
+ httpMetrics = append(httpMetrics, inFlightMetric, requestDurationMetric, requestTotalMetric, requestSizeMetric, responseSizeMetric)
+ return httpMetrics
+}
+
+func (n *Namespace) NewInFlightGaugeMetric(handlerName string) *HTTPMetric {
+ labels := prometheus.Labels(n.labels)
+ labels["handler"] = handlerName
+ metric := prometheus.NewGauge(prometheus.GaugeOpts{
+ Namespace: n.name,
+ Subsystem: n.subsystem,
+ Name: "in_flight_requests",
+ Help: "The in-flight HTTP requests",
+ ConstLabels: prometheus.Labels(labels),
+ })
+ httpMetric := &HTTPMetric{
+ Collector: metric,
+ handlerType: InstrumentHandlerInFlight,
+ }
+ n.Add(httpMetric)
+ return httpMetric
+}
+
+func (n *Namespace) NewRequestTotalMetric(handlerName string) *HTTPMetric {
+ labels := prometheus.Labels(n.labels)
+ labels["handler"] = handlerName
+ metric := prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: n.name,
+ Subsystem: n.subsystem,
+ Name: "requests_total",
+ Help: "Total number of HTTP requests made.",
+ ConstLabels: prometheus.Labels(labels),
+ },
+ []string{"code", "method"},
+ )
+ httpMetric := &HTTPMetric{
+ Collector: metric,
+ handlerType: InstrumentHandlerCounter,
+ }
+ n.Add(httpMetric)
+ return httpMetric
+}
+func (n *Namespace) NewRequestDurationMetric(handlerName string, buckets []float64) *HTTPMetric {
+ if len(buckets) == 0 {
+ panic("DurationBuckets must be provided")
+ }
+ labels := prometheus.Labels(n.labels)
+ labels["handler"] = handlerName
+ opts := prometheus.HistogramOpts{
+ Namespace: n.name,
+ Subsystem: n.subsystem,
+ Name: "request_duration_seconds",
+ Help: "The HTTP request latencies in seconds.",
+ Buckets: buckets,
+ ConstLabels: prometheus.Labels(labels),
+ }
+ metric := prometheus.NewHistogramVec(opts, []string{"method"})
+ httpMetric := &HTTPMetric{
+ Collector: metric,
+ handlerType: InstrumentHandlerDuration,
+ }
+ n.Add(httpMetric)
+ return httpMetric
+}
+
+func (n *Namespace) NewRequestSizeMetric(handlerName string, buckets []float64) *HTTPMetric {
+ if len(buckets) == 0 {
+ panic("RequestSizeBuckets must be provided")
+ }
+ labels := prometheus.Labels(n.labels)
+ labels["handler"] = handlerName
+ opts := prometheus.HistogramOpts{
+ Namespace: n.name,
+ Subsystem: n.subsystem,
+ Name: "request_size_bytes",
+ Help: "The HTTP request sizes in bytes.",
+ Buckets: buckets,
+ ConstLabels: prometheus.Labels(labels),
+ }
+ metric := prometheus.NewHistogramVec(opts, []string{})
+ httpMetric := &HTTPMetric{
+ Collector: metric,
+ handlerType: InstrumentHandlerRequestSize,
+ }
+ n.Add(httpMetric)
+ return httpMetric
+}
+
+func (n *Namespace) NewResponseSizeMetric(handlerName string, buckets []float64) *HTTPMetric {
+ if len(buckets) == 0 {
+ panic("ResponseSizeBuckets must be provided")
+ }
+ labels := prometheus.Labels(n.labels)
+ labels["handler"] = handlerName
+ opts := prometheus.HistogramOpts{
+ Namespace: n.name,
+ Subsystem: n.subsystem,
+ Name: "response_size_bytes",
+ Help: "The HTTP response sizes in bytes.",
+ Buckets: buckets,
+ ConstLabels: prometheus.Labels(labels),
+ }
+ metrics := prometheus.NewHistogramVec(opts, []string{})
+ httpMetric := &HTTPMetric{
+ Collector: metrics,
+ handlerType: InstrumentHandlerResponseSize,
+ }
+ n.Add(httpMetric)
+ return httpMetric
+}
diff --git a/vendor/github.com/gogo/googleapis/go.mod b/vendor/github.com/gogo/googleapis/go.mod
index 4cdef556e..89be4cf87 100644
--- a/vendor/github.com/gogo/googleapis/go.mod
+++ b/vendor/github.com/gogo/googleapis/go.mod
@@ -2,4 +2,4 @@ module github.com/gogo/googleapis
go 1.12
-require github.com/gogo/protobuf v1.2.1
+require github.com/gogo/protobuf v1.3.1
diff --git a/vendor/github.com/gogo/googleapis/google/rpc/code.pb.go b/vendor/github.com/gogo/googleapis/google/rpc/code.pb.go
index e2c94aece..1f4d4d432 100644
--- a/vendor/github.com/gogo/googleapis/google/rpc/code.pb.go
+++ b/vendor/github.com/gogo/googleapis/google/rpc/code.pb.go
@@ -19,7 +19,7 @@ var _ = math.Inf
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
// The canonical error codes for Google APIs.
//
diff --git a/vendor/github.com/gogo/googleapis/google/rpc/error_details.pb.go b/vendor/github.com/gogo/googleapis/google/rpc/error_details.pb.go
index 657f1dac1..07d251d4b 100644
--- a/vendor/github.com/gogo/googleapis/google/rpc/error_details.pb.go
+++ b/vendor/github.com/gogo/googleapis/google/rpc/error_details.pb.go
@@ -10,6 +10,7 @@ import (
types "github.com/gogo/protobuf/types"
io "io"
math "math"
+ math_bits "math/bits"
reflect "reflect"
strings "strings"
)
@@ -23,7 +24,7 @@ var _ = math.Inf
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
// Describes when the clients can retry a failed request. Clients could ignore
// the recommendation here or retry when this information is missing from error
@@ -59,7 +60,7 @@ func (m *RetryInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_RetryInfo.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -113,7 +114,7 @@ func (m *DebugInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_DebugInfo.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -182,7 +183,7 @@ func (m *QuotaFailure) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
return xxx_messageInfo_QuotaFailure.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -245,7 +246,7 @@ func (m *QuotaFailure_Violation) XXX_Marshal(b []byte, deterministic bool) ([]by
return xxx_messageInfo_QuotaFailure_Violation.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -308,7 +309,7 @@ func (m *PreconditionFailure) XXX_Marshal(b []byte, deterministic bool) ([]byte,
return xxx_messageInfo_PreconditionFailure.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -371,7 +372,7 @@ func (m *PreconditionFailure_Violation) XXX_Marshal(b []byte, deterministic bool
return xxx_messageInfo_PreconditionFailure_Violation.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -438,7 +439,7 @@ func (m *BadRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_BadRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -494,7 +495,7 @@ func (m *BadRequest_FieldViolation) XXX_Marshal(b []byte, deterministic bool) ([
return xxx_messageInfo_BadRequest_FieldViolation.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -558,7 +559,7 @@ func (m *RequestInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
return xxx_messageInfo_RequestInfo.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -632,7 +633,7 @@ func (m *ResourceInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
return xxx_messageInfo_ResourceInfo.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -709,7 +710,7 @@ func (m *Help) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Help.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -763,7 +764,7 @@ func (m *Help_Link) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Help_Link.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -827,7 +828,7 @@ func (m *LocalizedMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, er
return xxx_messageInfo_LocalizedMessage.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -2093,7 +2094,7 @@ func valueToGoStringErrorDetails(v interface{}, typ string) string {
func (m *RetryInfo) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -2101,30 +2102,38 @@ func (m *RetryInfo) Marshal() (dAtA []byte, err error) {
}
func (m *RetryInfo) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *RetryInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if m.RetryDelay != nil {
- dAtA[i] = 0xa
- i++
- i = encodeVarintErrorDetails(dAtA, i, uint64(m.RetryDelay.Size()))
- n1, err := m.RetryDelay.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
+ {
+ size, err := m.RetryDelay.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintErrorDetails(dAtA, i, uint64(size))
}
- i += n1
- }
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *DebugInfo) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -2132,41 +2141,42 @@ func (m *DebugInfo) Marshal() (dAtA []byte, err error) {
}
func (m *DebugInfo) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DebugInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.StackEntries) > 0 {
- for _, s := range m.StackEntries {
- dAtA[i] = 0xa
- i++
- l = len(s)
- for l >= 1<<7 {
- dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
- l >>= 7
- i++
- }
- dAtA[i] = uint8(l)
- i++
- i += copy(dAtA[i:], s)
- }
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
if len(m.Detail) > 0 {
- dAtA[i] = 0x12
- i++
+ i -= len(m.Detail)
+ copy(dAtA[i:], m.Detail)
i = encodeVarintErrorDetails(dAtA, i, uint64(len(m.Detail)))
- i += copy(dAtA[i:], m.Detail)
+ i--
+ dAtA[i] = 0x12
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if len(m.StackEntries) > 0 {
+ for iNdEx := len(m.StackEntries) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.StackEntries[iNdEx])
+ copy(dAtA[i:], m.StackEntries[iNdEx])
+ i = encodeVarintErrorDetails(dAtA, i, uint64(len(m.StackEntries[iNdEx])))
+ i--
+ dAtA[i] = 0xa
+ }
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *QuotaFailure) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -2174,32 +2184,40 @@ func (m *QuotaFailure) Marshal() (dAtA []byte, err error) {
}
func (m *QuotaFailure) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *QuotaFailure) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if len(m.Violations) > 0 {
- for _, msg := range m.Violations {
- dAtA[i] = 0xa
- i++
- i = encodeVarintErrorDetails(dAtA, i, uint64(msg.Size()))
- n, err := msg.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
+ for iNdEx := len(m.Violations) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Violations[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintErrorDetails(dAtA, i, uint64(size))
}
- i += n
+ i--
+ dAtA[i] = 0xa
}
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
- }
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *QuotaFailure_Violation) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -2207,32 +2225,40 @@ func (m *QuotaFailure_Violation) Marshal() (dAtA []byte, err error) {
}
func (m *QuotaFailure_Violation) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *QuotaFailure_Violation) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.Subject) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintErrorDetails(dAtA, i, uint64(len(m.Subject)))
- i += copy(dAtA[i:], m.Subject)
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
if len(m.Description) > 0 {
- dAtA[i] = 0x12
- i++
+ i -= len(m.Description)
+ copy(dAtA[i:], m.Description)
i = encodeVarintErrorDetails(dAtA, i, uint64(len(m.Description)))
- i += copy(dAtA[i:], m.Description)
+ i--
+ dAtA[i] = 0x12
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if len(m.Subject) > 0 {
+ i -= len(m.Subject)
+ copy(dAtA[i:], m.Subject)
+ i = encodeVarintErrorDetails(dAtA, i, uint64(len(m.Subject)))
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *PreconditionFailure) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -2240,32 +2266,40 @@ func (m *PreconditionFailure) Marshal() (dAtA []byte, err error) {
}
func (m *PreconditionFailure) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PreconditionFailure) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if len(m.Violations) > 0 {
- for _, msg := range m.Violations {
- dAtA[i] = 0xa
- i++
- i = encodeVarintErrorDetails(dAtA, i, uint64(msg.Size()))
- n, err := msg.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
+ for iNdEx := len(m.Violations) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Violations[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintErrorDetails(dAtA, i, uint64(size))
}
- i += n
+ i--
+ dAtA[i] = 0xa
}
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
- }
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *PreconditionFailure_Violation) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -2273,38 +2307,47 @@ func (m *PreconditionFailure_Violation) Marshal() (dAtA []byte, err error) {
}
func (m *PreconditionFailure_Violation) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PreconditionFailure_Violation) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.Type) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintErrorDetails(dAtA, i, uint64(len(m.Type)))
- i += copy(dAtA[i:], m.Type)
- }
- if len(m.Subject) > 0 {
- dAtA[i] = 0x12
- i++
- i = encodeVarintErrorDetails(dAtA, i, uint64(len(m.Subject)))
- i += copy(dAtA[i:], m.Subject)
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
if len(m.Description) > 0 {
- dAtA[i] = 0x1a
- i++
+ i -= len(m.Description)
+ copy(dAtA[i:], m.Description)
i = encodeVarintErrorDetails(dAtA, i, uint64(len(m.Description)))
- i += copy(dAtA[i:], m.Description)
+ i--
+ dAtA[i] = 0x1a
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if len(m.Subject) > 0 {
+ i -= len(m.Subject)
+ copy(dAtA[i:], m.Subject)
+ i = encodeVarintErrorDetails(dAtA, i, uint64(len(m.Subject)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Type) > 0 {
+ i -= len(m.Type)
+ copy(dAtA[i:], m.Type)
+ i = encodeVarintErrorDetails(dAtA, i, uint64(len(m.Type)))
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *BadRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -2312,32 +2355,40 @@ func (m *BadRequest) Marshal() (dAtA []byte, err error) {
}
func (m *BadRequest) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *BadRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if len(m.FieldViolations) > 0 {
- for _, msg := range m.FieldViolations {
- dAtA[i] = 0xa
- i++
- i = encodeVarintErrorDetails(dAtA, i, uint64(msg.Size()))
- n, err := msg.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
+ for iNdEx := len(m.FieldViolations) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.FieldViolations[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintErrorDetails(dAtA, i, uint64(size))
}
- i += n
+ i--
+ dAtA[i] = 0xa
}
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
- }
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *BadRequest_FieldViolation) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -2345,32 +2396,40 @@ func (m *BadRequest_FieldViolation) Marshal() (dAtA []byte, err error) {
}
func (m *BadRequest_FieldViolation) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *BadRequest_FieldViolation) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.Field) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintErrorDetails(dAtA, i, uint64(len(m.Field)))
- i += copy(dAtA[i:], m.Field)
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
if len(m.Description) > 0 {
- dAtA[i] = 0x12
- i++
+ i -= len(m.Description)
+ copy(dAtA[i:], m.Description)
i = encodeVarintErrorDetails(dAtA, i, uint64(len(m.Description)))
- i += copy(dAtA[i:], m.Description)
+ i--
+ dAtA[i] = 0x12
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if len(m.Field) > 0 {
+ i -= len(m.Field)
+ copy(dAtA[i:], m.Field)
+ i = encodeVarintErrorDetails(dAtA, i, uint64(len(m.Field)))
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *RequestInfo) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -2378,32 +2437,40 @@ func (m *RequestInfo) Marshal() (dAtA []byte, err error) {
}
func (m *RequestInfo) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *RequestInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.RequestId) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintErrorDetails(dAtA, i, uint64(len(m.RequestId)))
- i += copy(dAtA[i:], m.RequestId)
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
if len(m.ServingData) > 0 {
- dAtA[i] = 0x12
- i++
+ i -= len(m.ServingData)
+ copy(dAtA[i:], m.ServingData)
i = encodeVarintErrorDetails(dAtA, i, uint64(len(m.ServingData)))
- i += copy(dAtA[i:], m.ServingData)
+ i--
+ dAtA[i] = 0x12
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if len(m.RequestId) > 0 {
+ i -= len(m.RequestId)
+ copy(dAtA[i:], m.RequestId)
+ i = encodeVarintErrorDetails(dAtA, i, uint64(len(m.RequestId)))
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *ResourceInfo) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -2411,44 +2478,54 @@ func (m *ResourceInfo) Marshal() (dAtA []byte, err error) {
}
func (m *ResourceInfo) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ResourceInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.ResourceType) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintErrorDetails(dAtA, i, uint64(len(m.ResourceType)))
- i += copy(dAtA[i:], m.ResourceType)
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
- if len(m.ResourceName) > 0 {
- dAtA[i] = 0x12
- i++
- i = encodeVarintErrorDetails(dAtA, i, uint64(len(m.ResourceName)))
- i += copy(dAtA[i:], m.ResourceName)
+ if len(m.Description) > 0 {
+ i -= len(m.Description)
+ copy(dAtA[i:], m.Description)
+ i = encodeVarintErrorDetails(dAtA, i, uint64(len(m.Description)))
+ i--
+ dAtA[i] = 0x22
}
if len(m.Owner) > 0 {
- dAtA[i] = 0x1a
- i++
+ i -= len(m.Owner)
+ copy(dAtA[i:], m.Owner)
i = encodeVarintErrorDetails(dAtA, i, uint64(len(m.Owner)))
- i += copy(dAtA[i:], m.Owner)
+ i--
+ dAtA[i] = 0x1a
}
- if len(m.Description) > 0 {
- dAtA[i] = 0x22
- i++
- i = encodeVarintErrorDetails(dAtA, i, uint64(len(m.Description)))
- i += copy(dAtA[i:], m.Description)
+ if len(m.ResourceName) > 0 {
+ i -= len(m.ResourceName)
+ copy(dAtA[i:], m.ResourceName)
+ i = encodeVarintErrorDetails(dAtA, i, uint64(len(m.ResourceName)))
+ i--
+ dAtA[i] = 0x12
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if len(m.ResourceType) > 0 {
+ i -= len(m.ResourceType)
+ copy(dAtA[i:], m.ResourceType)
+ i = encodeVarintErrorDetails(dAtA, i, uint64(len(m.ResourceType)))
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *Help) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -2456,32 +2533,40 @@ func (m *Help) Marshal() (dAtA []byte, err error) {
}
func (m *Help) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Help) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if len(m.Links) > 0 {
- for _, msg := range m.Links {
- dAtA[i] = 0xa
- i++
- i = encodeVarintErrorDetails(dAtA, i, uint64(msg.Size()))
- n, err := msg.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
+ for iNdEx := len(m.Links) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Links[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintErrorDetails(dAtA, i, uint64(size))
}
- i += n
+ i--
+ dAtA[i] = 0xa
}
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
- }
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *Help_Link) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -2489,32 +2574,40 @@ func (m *Help_Link) Marshal() (dAtA []byte, err error) {
}
func (m *Help_Link) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Help_Link) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.Description) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintErrorDetails(dAtA, i, uint64(len(m.Description)))
- i += copy(dAtA[i:], m.Description)
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
if len(m.Url) > 0 {
- dAtA[i] = 0x12
- i++
+ i -= len(m.Url)
+ copy(dAtA[i:], m.Url)
i = encodeVarintErrorDetails(dAtA, i, uint64(len(m.Url)))
- i += copy(dAtA[i:], m.Url)
+ i--
+ dAtA[i] = 0x12
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if len(m.Description) > 0 {
+ i -= len(m.Description)
+ copy(dAtA[i:], m.Description)
+ i = encodeVarintErrorDetails(dAtA, i, uint64(len(m.Description)))
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *LocalizedMessage) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -2522,40 +2615,50 @@ func (m *LocalizedMessage) Marshal() (dAtA []byte, err error) {
}
func (m *LocalizedMessage) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *LocalizedMessage) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.Locale) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintErrorDetails(dAtA, i, uint64(len(m.Locale)))
- i += copy(dAtA[i:], m.Locale)
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
if len(m.Message) > 0 {
- dAtA[i] = 0x12
- i++
+ i -= len(m.Message)
+ copy(dAtA[i:], m.Message)
i = encodeVarintErrorDetails(dAtA, i, uint64(len(m.Message)))
- i += copy(dAtA[i:], m.Message)
+ i--
+ dAtA[i] = 0x12
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if len(m.Locale) > 0 {
+ i -= len(m.Locale)
+ copy(dAtA[i:], m.Locale)
+ i = encodeVarintErrorDetails(dAtA, i, uint64(len(m.Locale)))
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func encodeVarintErrorDetails(dAtA []byte, offset int, v uint64) int {
+ offset -= sovErrorDetails(v)
+ base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
- return offset + 1
+ return base
}
func NewPopulatedRetryInfo(r randyErrorDetails, easy bool) *RetryInfo {
this := &RetryInfo{}
- if r.Intn(10) != 0 {
+ if r.Intn(5) != 0 {
this.RetryDelay = types.NewPopulatedDuration(r, easy)
}
if !easy && r.Intn(10) != 0 {
@@ -2580,7 +2683,7 @@ func NewPopulatedDebugInfo(r randyErrorDetails, easy bool) *DebugInfo {
func NewPopulatedQuotaFailure(r randyErrorDetails, easy bool) *QuotaFailure {
this := &QuotaFailure{}
- if r.Intn(10) != 0 {
+ if r.Intn(5) != 0 {
v2 := r.Intn(5)
this.Violations = make([]*QuotaFailure_Violation, v2)
for i := 0; i < v2; i++ {
@@ -2605,7 +2708,7 @@ func NewPopulatedQuotaFailure_Violation(r randyErrorDetails, easy bool) *QuotaFa
func NewPopulatedPreconditionFailure(r randyErrorDetails, easy bool) *PreconditionFailure {
this := &PreconditionFailure{}
- if r.Intn(10) != 0 {
+ if r.Intn(5) != 0 {
v3 := r.Intn(5)
this.Violations = make([]*PreconditionFailure_Violation, v3)
for i := 0; i < v3; i++ {
@@ -2631,7 +2734,7 @@ func NewPopulatedPreconditionFailure_Violation(r randyErrorDetails, easy bool) *
func NewPopulatedBadRequest(r randyErrorDetails, easy bool) *BadRequest {
this := &BadRequest{}
- if r.Intn(10) != 0 {
+ if r.Intn(5) != 0 {
v4 := r.Intn(5)
this.FieldViolations = make([]*BadRequest_FieldViolation, v4)
for i := 0; i < v4; i++ {
@@ -2678,7 +2781,7 @@ func NewPopulatedResourceInfo(r randyErrorDetails, easy bool) *ResourceInfo {
func NewPopulatedHelp(r randyErrorDetails, easy bool) *Help {
this := &Help{}
- if r.Intn(10) != 0 {
+ if r.Intn(5) != 0 {
v5 := r.Intn(5)
this.Links = make([]*Help_Link, v5)
for i := 0; i < v5; i++ {
@@ -3046,14 +3149,7 @@ func (m *LocalizedMessage) Size() (n int) {
}
func sovErrorDetails(x uint64) (n int) {
- for {
- n++
- x >>= 7
- if x == 0 {
- break
- }
- }
- return n
+ return (math_bits.Len64(x|1) + 6) / 7
}
func sozErrorDetails(x uint64) (n int) {
return sovErrorDetails(uint64((x << 1) ^ uint64((int64(x) >> 63))))
@@ -3085,8 +3181,13 @@ func (this *QuotaFailure) String() string {
if this == nil {
return "nil"
}
+ repeatedStringForViolations := "[]*QuotaFailure_Violation{"
+ for _, f := range this.Violations {
+ repeatedStringForViolations += strings.Replace(fmt.Sprintf("%v", f), "QuotaFailure_Violation", "QuotaFailure_Violation", 1) + ","
+ }
+ repeatedStringForViolations += "}"
s := strings.Join([]string{`&QuotaFailure{`,
- `Violations:` + strings.Replace(fmt.Sprintf("%v", this.Violations), "QuotaFailure_Violation", "QuotaFailure_Violation", 1) + `,`,
+ `Violations:` + repeatedStringForViolations + `,`,
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
`}`,
}, "")
@@ -3108,8 +3209,13 @@ func (this *PreconditionFailure) String() string {
if this == nil {
return "nil"
}
+ repeatedStringForViolations := "[]*PreconditionFailure_Violation{"
+ for _, f := range this.Violations {
+ repeatedStringForViolations += strings.Replace(fmt.Sprintf("%v", f), "PreconditionFailure_Violation", "PreconditionFailure_Violation", 1) + ","
+ }
+ repeatedStringForViolations += "}"
s := strings.Join([]string{`&PreconditionFailure{`,
- `Violations:` + strings.Replace(fmt.Sprintf("%v", this.Violations), "PreconditionFailure_Violation", "PreconditionFailure_Violation", 1) + `,`,
+ `Violations:` + repeatedStringForViolations + `,`,
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
`}`,
}, "")
@@ -3132,8 +3238,13 @@ func (this *BadRequest) String() string {
if this == nil {
return "nil"
}
+ repeatedStringForFieldViolations := "[]*BadRequest_FieldViolation{"
+ for _, f := range this.FieldViolations {
+ repeatedStringForFieldViolations += strings.Replace(fmt.Sprintf("%v", f), "BadRequest_FieldViolation", "BadRequest_FieldViolation", 1) + ","
+ }
+ repeatedStringForFieldViolations += "}"
s := strings.Join([]string{`&BadRequest{`,
- `FieldViolations:` + strings.Replace(fmt.Sprintf("%v", this.FieldViolations), "BadRequest_FieldViolation", "BadRequest_FieldViolation", 1) + `,`,
+ `FieldViolations:` + repeatedStringForFieldViolations + `,`,
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
`}`,
}, "")
@@ -3181,8 +3292,13 @@ func (this *Help) String() string {
if this == nil {
return "nil"
}
+ repeatedStringForLinks := "[]*Help_Link{"
+ for _, f := range this.Links {
+ repeatedStringForLinks += strings.Replace(fmt.Sprintf("%v", f), "Help_Link", "Help_Link", 1) + ","
+ }
+ repeatedStringForLinks += "}"
s := strings.Join([]string{`&Help{`,
- `Links:` + strings.Replace(fmt.Sprintf("%v", this.Links), "Help_Link", "Help_Link", 1) + `,`,
+ `Links:` + repeatedStringForLinks + `,`,
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
`}`,
}, "")
@@ -4705,6 +4821,7 @@ func (m *LocalizedMessage) Unmarshal(dAtA []byte) error {
func skipErrorDetails(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
+ depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
@@ -4736,10 +4853,8 @@ func skipErrorDetails(dAtA []byte) (n int, err error) {
break
}
}
- return iNdEx, nil
case 1:
iNdEx += 8
- return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
@@ -4760,55 +4875,30 @@ func skipErrorDetails(dAtA []byte) (n int, err error) {
return 0, ErrInvalidLengthErrorDetails
}
iNdEx += length
- if iNdEx < 0 {
- return 0, ErrInvalidLengthErrorDetails
- }
- return iNdEx, nil
case 3:
- for {
- var innerWire uint64
- var start int = iNdEx
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowErrorDetails
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- innerWire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- innerWireType := int(innerWire & 0x7)
- if innerWireType == 4 {
- break
- }
- next, err := skipErrorDetails(dAtA[start:])
- if err != nil {
- return 0, err
- }
- iNdEx = start + next
- if iNdEx < 0 {
- return 0, ErrInvalidLengthErrorDetails
- }
- }
- return iNdEx, nil
+ depth++
case 4:
- return iNdEx, nil
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupErrorDetails
+ }
+ depth--
case 5:
iNdEx += 4
- return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthErrorDetails
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
}
- panic("unreachable")
+ return 0, io.ErrUnexpectedEOF
}
var (
- ErrInvalidLengthErrorDetails = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowErrorDetails = fmt.Errorf("proto: integer overflow")
+ ErrInvalidLengthErrorDetails = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowErrorDetails = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupErrorDetails = fmt.Errorf("proto: unexpected end of group")
)
diff --git a/vendor/github.com/gogo/googleapis/google/rpc/status.pb.go b/vendor/github.com/gogo/googleapis/google/rpc/status.pb.go
index 65fc30f4a..59793ba38 100644
--- a/vendor/github.com/gogo/googleapis/google/rpc/status.pb.go
+++ b/vendor/github.com/gogo/googleapis/google/rpc/status.pb.go
@@ -10,6 +10,7 @@ import (
types "github.com/gogo/protobuf/types"
io "io"
math "math"
+ math_bits "math/bits"
reflect "reflect"
strings "strings"
)
@@ -23,7 +24,7 @@ var _ = math.Inf
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
// The `Status` type defines a logical error model that is suitable for
// different programming environments, including REST APIs and RPC APIs. It is
@@ -108,7 +109,7 @@ func (m *Status) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Status.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -295,7 +296,7 @@ func valueToGoStringStatus(v interface{}, typ string) string {
func (m *Status) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -303,47 +304,58 @@ func (m *Status) Marshal() (dAtA []byte, err error) {
}
func (m *Status) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Status) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if m.Code != 0 {
- dAtA[i] = 0x8
- i++
- i = encodeVarintStatus(dAtA, i, uint64(m.Code))
- }
- if len(m.Message) > 0 {
- dAtA[i] = 0x12
- i++
- i = encodeVarintStatus(dAtA, i, uint64(len(m.Message)))
- i += copy(dAtA[i:], m.Message)
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
if len(m.Details) > 0 {
- for _, msg := range m.Details {
- dAtA[i] = 0x1a
- i++
- i = encodeVarintStatus(dAtA, i, uint64(msg.Size()))
- n, err := msg.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
+ for iNdEx := len(m.Details) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Details[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintStatus(dAtA, i, uint64(size))
}
- i += n
+ i--
+ dAtA[i] = 0x1a
}
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if len(m.Message) > 0 {
+ i -= len(m.Message)
+ copy(dAtA[i:], m.Message)
+ i = encodeVarintStatus(dAtA, i, uint64(len(m.Message)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.Code != 0 {
+ i = encodeVarintStatus(dAtA, i, uint64(m.Code))
+ i--
+ dAtA[i] = 0x8
}
- return i, nil
+ return len(dAtA) - i, nil
}
func encodeVarintStatus(dAtA []byte, offset int, v uint64) int {
+ offset -= sovStatus(v)
+ base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
- return offset + 1
+ return base
}
func NewPopulatedStatus(r randyStatus, easy bool) *Status {
this := &Status{}
@@ -352,7 +364,7 @@ func NewPopulatedStatus(r randyStatus, easy bool) *Status {
this.Code *= -1
}
this.Message = string(randStringStatus(r))
- if r.Intn(10) != 0 {
+ if r.Intn(5) != 0 {
v1 := r.Intn(5)
this.Details = make([]*types.Any, v1)
for i := 0; i < v1; i++ {
@@ -463,14 +475,7 @@ func (m *Status) Size() (n int) {
}
func sovStatus(x uint64) (n int) {
- for {
- n++
- x >>= 7
- if x == 0 {
- break
- }
- }
- return n
+ return (math_bits.Len64(x|1) + 6) / 7
}
func sozStatus(x uint64) (n int) {
return sovStatus(uint64((x << 1) ^ uint64((int64(x) >> 63))))
@@ -479,10 +484,15 @@ func (this *Status) String() string {
if this == nil {
return "nil"
}
+ repeatedStringForDetails := "[]*Any{"
+ for _, f := range this.Details {
+ repeatedStringForDetails += strings.Replace(fmt.Sprintf("%v", f), "Any", "types.Any", 1) + ","
+ }
+ repeatedStringForDetails += "}"
s := strings.Join([]string{`&Status{`,
`Code:` + fmt.Sprintf("%v", this.Code) + `,`,
`Message:` + fmt.Sprintf("%v", this.Message) + `,`,
- `Details:` + strings.Replace(fmt.Sprintf("%v", this.Details), "Any", "types.Any", 1) + `,`,
+ `Details:` + repeatedStringForDetails + `,`,
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
`}`,
}, "")
@@ -638,6 +648,7 @@ func (m *Status) Unmarshal(dAtA []byte) error {
func skipStatus(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
+ depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
@@ -669,10 +680,8 @@ func skipStatus(dAtA []byte) (n int, err error) {
break
}
}
- return iNdEx, nil
case 1:
iNdEx += 8
- return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
@@ -693,55 +702,30 @@ func skipStatus(dAtA []byte) (n int, err error) {
return 0, ErrInvalidLengthStatus
}
iNdEx += length
- if iNdEx < 0 {
- return 0, ErrInvalidLengthStatus
- }
- return iNdEx, nil
case 3:
- for {
- var innerWire uint64
- var start int = iNdEx
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowStatus
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- innerWire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- innerWireType := int(innerWire & 0x7)
- if innerWireType == 4 {
- break
- }
- next, err := skipStatus(dAtA[start:])
- if err != nil {
- return 0, err
- }
- iNdEx = start + next
- if iNdEx < 0 {
- return 0, ErrInvalidLengthStatus
- }
- }
- return iNdEx, nil
+ depth++
case 4:
- return iNdEx, nil
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupStatus
+ }
+ depth--
case 5:
iNdEx += 4
- return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthStatus
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
}
- panic("unreachable")
+ return 0, io.ErrUnexpectedEOF
}
var (
- ErrInvalidLengthStatus = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowStatus = fmt.Errorf("proto: integer overflow")
+ ErrInvalidLengthStatus = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowStatus = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupStatus = fmt.Errorf("proto: unexpected end of group")
)
diff --git a/vendor/github.com/gogo/protobuf/Readme.md b/vendor/github.com/gogo/protobuf/Readme.md
index 06685a28a..7e2d538a9 100644
--- a/vendor/github.com/gogo/protobuf/Readme.md
+++ b/vendor/github.com/gogo/protobuf/Readme.md
@@ -17,6 +17,18 @@ This code generation is used to achieve:
Keeping track of how up to date gogoprotobuf is relative to golang/protobuf is done in this
issue
+## Release v1.3.0
+
+The project has updated to release v1.3.0. Check out the release notes here.
+
+With this new release comes a new internal library version. This means any newly generated *pb.go files generated with the v1.3.0 library will not be compatible with the old library version (v1.2.1). However, current *pb.go files (generated with v1.2.1) should still work with the new library.
+
+Please make sure you manage your dependencies correctly when upgrading your project. If you are still using v1.2.1 and you update your dependencies, one of which could include a new *pb.go (generated with v1.3.0), you could get a compile time error.
+
+Our upstream repo, golang/protobuf, also had to go through this process in order to update their library version.
+Here is a link explaining hermetic builds.
+
+
## Users
These projects use gogoprotobuf:
@@ -49,6 +61,8 @@ These projects use gogoprotobuf:
- go-spacemesh
- cortex - sample proto file
- Apache SkyWalking APM - Istio telemetry receiver based on Mixer bypass protocol
+ - Hyperledger Burrow - a permissioned DLT framework
+ - IOV Weave - a blockchain framework - sample proto files
Please let us know if you are using gogoprotobuf by posting on our GoogleGroup.
diff --git a/vendor/github.com/gogo/protobuf/go.mod b/vendor/github.com/gogo/protobuf/go.mod
index 6f7e29139..fa2c3a967 100644
--- a/vendor/github.com/gogo/protobuf/go.mod
+++ b/vendor/github.com/gogo/protobuf/go.mod
@@ -1,3 +1,6 @@
module github.com/gogo/protobuf
-require github.com/kisielk/errcheck v1.1.0 // indirect
+require (
+ github.com/kisielk/errcheck v1.2.0 // indirect
+ github.com/kisielk/gotool v1.0.0 // indirect
+)
diff --git a/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go b/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go
index e352808b9..1e91766ae 100644
--- a/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go
+++ b/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go
@@ -19,7 +19,7 @@ var _ = math.Inf
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
var E_GoprotoEnumPrefix = &proto.ExtensionDesc{
ExtendedType: (*descriptor.EnumOptions)(nil),
diff --git a/vendor/github.com/gogo/protobuf/proto/encode.go b/vendor/github.com/gogo/protobuf/proto/encode.go
index 3abfed2cf..9581ccd30 100644
--- a/vendor/github.com/gogo/protobuf/proto/encode.go
+++ b/vendor/github.com/gogo/protobuf/proto/encode.go
@@ -189,6 +189,8 @@ type Marshaler interface {
// prefixed by a varint-encoded length.
func (p *Buffer) EncodeMessage(pb Message) error {
siz := Size(pb)
+ sizVar := SizeVarint(uint64(siz))
+ p.grow(siz + sizVar)
p.EncodeVarint(uint64(siz))
return p.Marshal(pb)
}
diff --git a/vendor/github.com/gogo/protobuf/proto/extensions.go b/vendor/github.com/gogo/protobuf/proto/extensions.go
index 686bd2a09..341c6f57f 100644
--- a/vendor/github.com/gogo/protobuf/proto/extensions.go
+++ b/vendor/github.com/gogo/protobuf/proto/extensions.go
@@ -527,6 +527,7 @@ func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) {
// SetExtension sets the specified extension of pb to the specified value.
func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error {
if epb, ok := pb.(extensionsBytes); ok {
+ ClearExtension(pb, extension)
newb, err := encodeExtension(extension, value)
if err != nil {
return err
diff --git a/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go b/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go
index 53ebd8cca..6f1ae120e 100644
--- a/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go
+++ b/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go
@@ -154,6 +154,10 @@ func EncodeInternalExtension(m extendableProto, data []byte) (n int, err error)
return EncodeExtensionMap(m.extensionsWrite(), data)
}
+func EncodeInternalExtensionBackwards(m extendableProto, data []byte) (n int, err error) {
+ return EncodeExtensionMapBackwards(m.extensionsWrite(), data)
+}
+
func EncodeExtensionMap(m map[int32]Extension, data []byte) (n int, err error) {
o := 0
for _, e := range m {
@@ -169,6 +173,23 @@ func EncodeExtensionMap(m map[int32]Extension, data []byte) (n int, err error) {
return o, nil
}
+func EncodeExtensionMapBackwards(m map[int32]Extension, data []byte) (n int, err error) {
+ o := 0
+ end := len(data)
+ for _, e := range m {
+ if err := e.Encode(); err != nil {
+ return 0, err
+ }
+ n := copy(data[end-len(e.enc):], e.enc)
+ if n != len(e.enc) {
+ return 0, io.ErrShortBuffer
+ }
+ end -= n
+ o += n
+ }
+ return o, nil
+}
+
func GetRawExtension(m map[int32]Extension, id int32) ([]byte, error) {
e := m[id]
if err := e.Encode(); err != nil {
diff --git a/vendor/github.com/gogo/protobuf/proto/lib.go b/vendor/github.com/gogo/protobuf/proto/lib.go
index d17f80209..80db1c155 100644
--- a/vendor/github.com/gogo/protobuf/proto/lib.go
+++ b/vendor/github.com/gogo/protobuf/proto/lib.go
@@ -948,13 +948,19 @@ func isProto3Zero(v reflect.Value) bool {
return false
}
-// ProtoPackageIsVersion2 is referenced from generated protocol buffer files
-// to assert that that code is compatible with this version of the proto package.
-const GoGoProtoPackageIsVersion2 = true
-
-// ProtoPackageIsVersion1 is referenced from generated protocol buffer files
-// to assert that that code is compatible with this version of the proto package.
-const GoGoProtoPackageIsVersion1 = true
+const (
+ // ProtoPackageIsVersion3 is referenced from generated protocol buffer files
+ // to assert that that code is compatible with this version of the proto package.
+ GoGoProtoPackageIsVersion3 = true
+
+ // ProtoPackageIsVersion2 is referenced from generated protocol buffer files
+ // to assert that that code is compatible with this version of the proto package.
+ GoGoProtoPackageIsVersion2 = true
+
+ // ProtoPackageIsVersion1 is referenced from generated protocol buffer files
+ // to assert that that code is compatible with this version of the proto package.
+ GoGoProtoPackageIsVersion1 = true
+)
// InternalMessageInfo is a type used internally by generated .pb.go files.
// This type is not intended to be used by non-generated code.
diff --git a/vendor/github.com/gogo/protobuf/proto/properties.go b/vendor/github.com/gogo/protobuf/proto/properties.go
index c9e5fa020..28da1475f 100644
--- a/vendor/github.com/gogo/protobuf/proto/properties.go
+++ b/vendor/github.com/gogo/protobuf/proto/properties.go
@@ -43,7 +43,6 @@ package proto
import (
"fmt"
"log"
- "os"
"reflect"
"sort"
"strconv"
@@ -205,7 +204,7 @@ func (p *Properties) Parse(s string) {
// "bytes,49,opt,name=foo,def=hello!"
fields := strings.Split(s, ",") // breaks def=, but handled below.
if len(fields) < 2 {
- fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s)
+ log.Printf("proto: tag has too few fields: %q", s)
return
}
@@ -225,7 +224,7 @@ func (p *Properties) Parse(s string) {
p.WireType = WireBytes
// no numeric converter for non-numeric types
default:
- fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s)
+ log.Printf("proto: tag has unknown wire type: %q", s)
return
}
@@ -400,6 +399,15 @@ func GetProperties(t reflect.Type) *StructProperties {
return sprop
}
+type (
+ oneofFuncsIface interface {
+ XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
+ }
+ oneofWrappersIface interface {
+ XXX_OneofWrappers() []interface{}
+ }
+)
+
// getPropertiesLocked requires that propertiesMu is held.
func getPropertiesLocked(t reflect.Type) *StructProperties {
if prop, ok := propertiesMap[t]; ok {
@@ -441,37 +449,40 @@ func getPropertiesLocked(t reflect.Type) *StructProperties {
// Re-order prop.order.
sort.Sort(prop)
- type oneofMessage interface {
- XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
- }
- if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); isOneofMessage && ok {
+ if isOneofMessage {
var oots []interface{}
- _, _, _, oots = om.XXX_OneofFuncs()
-
- // Interpret oneof metadata.
- prop.OneofTypes = make(map[string]*OneofProperties)
- for _, oot := range oots {
- oop := &OneofProperties{
- Type: reflect.ValueOf(oot).Type(), // *T
- Prop: new(Properties),
- }
- sft := oop.Type.Elem().Field(0)
- oop.Prop.Name = sft.Name
- oop.Prop.Parse(sft.Tag.Get("protobuf"))
- // There will be exactly one interface field that
- // this new value is assignable to.
- for i := 0; i < t.NumField(); i++ {
- f := t.Field(i)
- if f.Type.Kind() != reflect.Interface {
- continue
+ switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
+ case oneofFuncsIface:
+ _, _, _, oots = m.XXX_OneofFuncs()
+ case oneofWrappersIface:
+ oots = m.XXX_OneofWrappers()
+ }
+ if len(oots) > 0 {
+ // Interpret oneof metadata.
+ prop.OneofTypes = make(map[string]*OneofProperties)
+ for _, oot := range oots {
+ oop := &OneofProperties{
+ Type: reflect.ValueOf(oot).Type(), // *T
+ Prop: new(Properties),
}
- if !oop.Type.AssignableTo(f.Type) {
- continue
+ sft := oop.Type.Elem().Field(0)
+ oop.Prop.Name = sft.Name
+ oop.Prop.Parse(sft.Tag.Get("protobuf"))
+ // There will be exactly one interface field that
+ // this new value is assignable to.
+ for i := 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+ if f.Type.Kind() != reflect.Interface {
+ continue
+ }
+ if !oop.Type.AssignableTo(f.Type) {
+ continue
+ }
+ oop.Field = i
+ break
}
- oop.Field = i
- break
+ prop.OneofTypes[oop.Prop.OrigName] = oop
}
- prop.OneofTypes[oop.Prop.OrigName] = oop
}
}
diff --git a/vendor/github.com/gogo/protobuf/proto/table_marshal.go b/vendor/github.com/gogo/protobuf/proto/table_marshal.go
index 9b1538d05..f8babdefa 100644
--- a/vendor/github.com/gogo/protobuf/proto/table_marshal.go
+++ b/vendor/github.com/gogo/protobuf/proto/table_marshal.go
@@ -389,8 +389,13 @@ func (u *marshalInfo) computeMarshalInfo() {
// get oneof implementers
var oneofImplementers []interface{}
// gogo: isOneofMessage is needed for embedded oneof messages, without a marshaler and unmarshaler
- if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok && isOneofMessage {
- _, _, _, oneofImplementers = m.XXX_OneofFuncs()
+ if isOneofMessage {
+ switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
+ case oneofFuncsIface:
+ _, _, _, oneofImplementers = m.XXX_OneofFuncs()
+ case oneofWrappersIface:
+ oneofImplementers = m.XXX_OneofWrappers()
+ }
}
// normal fields
@@ -519,10 +524,6 @@ func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofI
}
}
-type oneofMessage interface {
- XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
-}
-
// wiretype returns the wire encoding of the type.
func wiretype(encoding string) uint64 {
switch encoding {
@@ -2968,7 +2969,9 @@ func (p *Buffer) Marshal(pb Message) error {
if m, ok := pb.(newMarshaler); ok {
siz := m.XXX_Size()
p.grow(siz) // make sure buf has enough capacity
- p.buf, err = m.XXX_Marshal(p.buf, p.deterministic)
+ pp := p.buf[len(p.buf) : len(p.buf) : len(p.buf)+siz]
+ pp, err = m.XXX_Marshal(pp, p.deterministic)
+ p.buf = append(p.buf, pp...)
return err
}
if m, ok := pb.(Marshaler); ok {
diff --git a/vendor/github.com/gogo/protobuf/proto/table_merge.go b/vendor/github.com/gogo/protobuf/proto/table_merge.go
index f520106e0..60dcf70d1 100644
--- a/vendor/github.com/gogo/protobuf/proto/table_merge.go
+++ b/vendor/github.com/gogo/protobuf/proto/table_merge.go
@@ -530,6 +530,25 @@ func (mi *mergeInfo) computeMergeInfo() {
}
case reflect.Struct:
switch {
+ case isSlice && !isPointer: // E.g. []pb.T
+ mergeInfo := getMergeInfo(tf)
+ zero := reflect.Zero(tf)
+ mfi.merge = func(dst, src pointer) {
+ // TODO: Make this faster?
+ dstsp := dst.asPointerTo(f.Type)
+ dsts := dstsp.Elem()
+ srcs := src.asPointerTo(f.Type).Elem()
+ for i := 0; i < srcs.Len(); i++ {
+ dsts = reflect.Append(dsts, zero)
+ srcElement := srcs.Index(i).Addr()
+ dstElement := dsts.Index(dsts.Len() - 1).Addr()
+ mergeInfo.merge(valToPointer(dstElement), valToPointer(srcElement))
+ }
+ if dsts.IsNil() {
+ dsts = reflect.MakeSlice(f.Type, 0, 0)
+ }
+ dstsp.Elem().Set(dsts)
+ }
case !isPointer:
mergeInfo := getMergeInfo(tf)
mfi.merge = func(dst, src pointer) {
diff --git a/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go b/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go
index bb2622f28..937229386 100644
--- a/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go
+++ b/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go
@@ -371,15 +371,18 @@ func (u *unmarshalInfo) computeUnmarshalInfo() {
}
// Find any types associated with oneof fields.
- // TODO: XXX_OneofFuncs returns more info than we need. Get rid of some of it?
- fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("XXX_OneofFuncs")
// gogo: len(oneofFields) > 0 is needed for embedded oneof messages, without a marshaler and unmarshaler
- if fn.IsValid() && len(oneofFields) > 0 {
- res := fn.Call(nil)[3] // last return value from XXX_OneofFuncs: []interface{}
- for i := res.Len() - 1; i >= 0; i-- {
- v := res.Index(i) // interface{}
- tptr := reflect.ValueOf(v.Interface()).Type() // *Msg_X
- typ := tptr.Elem() // Msg_X
+ if len(oneofFields) > 0 {
+ var oneofImplementers []interface{}
+ switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
+ case oneofFuncsIface:
+ _, _, _, oneofImplementers = m.XXX_OneofFuncs()
+ case oneofWrappersIface:
+ oneofImplementers = m.XXX_OneofWrappers()
+ }
+ for _, v := range oneofImplementers {
+ tptr := reflect.TypeOf(v) // *Msg_X
+ typ := tptr.Elem() // Msg_X
f := typ.Field(0) // oneof implementers have one field
baseUnmarshal := fieldUnmarshaler(&f)
@@ -407,11 +410,12 @@ func (u *unmarshalInfo) computeUnmarshalInfo() {
u.setTag(fieldNum, of.field, unmarshal, 0, name)
}
}
+
}
}
// Get extension ranges, if any.
- fn = reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray")
+ fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray")
if fn.IsValid() {
if !u.extensions.IsValid() && !u.oldExtensions.IsValid() && !u.bytesExtensions.IsValid() {
panic("a message with extensions, but no extensions field in " + t.Name())
diff --git a/vendor/github.com/gogo/protobuf/proto/text.go b/vendor/github.com/gogo/protobuf/proto/text.go
index 0407ba85d..87416afe9 100644
--- a/vendor/github.com/gogo/protobuf/proto/text.go
+++ b/vendor/github.com/gogo/protobuf/proto/text.go
@@ -476,6 +476,8 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
return nil
}
+var textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem()
+
// writeAny writes an arbitrary field.
func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error {
v = reflect.Indirect(v)
@@ -589,8 +591,8 @@ func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Propert
// mutating this value.
v = v.Addr()
}
- if etm, ok := v.Interface().(encoding.TextMarshaler); ok {
- text, err := etm.MarshalText()
+ if v.Type().Implements(textMarshalerType) {
+ text, err := v.Interface().(encoding.TextMarshaler).MarshalText()
if err != nil {
return err
}
diff --git a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/compiler/plugin.proto b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/compiler/plugin.proto
index e85c852fc..4a88adf14 100644
--- a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/compiler/plugin.proto
+++ b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/compiler/plugin.proto
@@ -45,6 +45,7 @@
// flag "--${NAME}_out" is passed to protoc.
syntax = "proto2";
+
package google.protobuf.compiler;
option java_package = "com.google.protobuf.compiler";
option java_outer_classname = "PluginProtos";
diff --git a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/descriptor.proto b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/descriptor.proto
index 887f16ddc..4a08905a5 100644
--- a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/descriptor.proto
+++ b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/descriptor.proto
@@ -40,6 +40,7 @@
syntax = "proto2";
package google.protobuf;
+
option go_package = "descriptor";
option java_package = "com.google.protobuf";
option java_outer_classname = "DescriptorProtos";
@@ -59,8 +60,8 @@ message FileDescriptorSet {
// Describes a complete .proto file.
message FileDescriptorProto {
- optional string name = 1; // file name, relative to root of source tree
- optional string package = 2; // e.g. "foo", "foo.bar", etc.
+ optional string name = 1; // file name, relative to root of source tree
+ optional string package = 2; // e.g. "foo", "foo.bar", etc.
// Names of files imported by this file.
repeated string dependency = 3;
@@ -100,8 +101,8 @@ message DescriptorProto {
repeated EnumDescriptorProto enum_type = 4;
message ExtensionRange {
- optional int32 start = 1;
- optional int32 end = 2;
+ optional int32 start = 1; // Inclusive.
+ optional int32 end = 2; // Exclusive.
optional ExtensionRangeOptions options = 3;
}
@@ -115,8 +116,8 @@ message DescriptorProto {
// fields or extension ranges in the same message. Reserved ranges may
// not overlap.
message ReservedRange {
- optional int32 start = 1; // Inclusive.
- optional int32 end = 2; // Exclusive.
+ optional int32 start = 1; // Inclusive.
+ optional int32 end = 2; // Exclusive.
}
repeated ReservedRange reserved_range = 9;
// Reserved field names, which may not be used by fields in the same message.
@@ -137,42 +138,42 @@ message FieldDescriptorProto {
enum Type {
// 0 is reserved for errors.
// Order is weird for historical reasons.
- TYPE_DOUBLE = 1;
- TYPE_FLOAT = 2;
+ TYPE_DOUBLE = 1;
+ TYPE_FLOAT = 2;
// Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if
// negative values are likely.
- TYPE_INT64 = 3;
- TYPE_UINT64 = 4;
+ TYPE_INT64 = 3;
+ TYPE_UINT64 = 4;
// Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if
// negative values are likely.
- TYPE_INT32 = 5;
- TYPE_FIXED64 = 6;
- TYPE_FIXED32 = 7;
- TYPE_BOOL = 8;
- TYPE_STRING = 9;
+ TYPE_INT32 = 5;
+ TYPE_FIXED64 = 6;
+ TYPE_FIXED32 = 7;
+ TYPE_BOOL = 8;
+ TYPE_STRING = 9;
// Tag-delimited aggregate.
// Group type is deprecated and not supported in proto3. However, Proto3
// implementations should still be able to parse the group wire format and
// treat group fields as unknown fields.
- TYPE_GROUP = 10;
- TYPE_MESSAGE = 11; // Length-delimited aggregate.
+ TYPE_GROUP = 10;
+ TYPE_MESSAGE = 11; // Length-delimited aggregate.
// New in version 2.
- TYPE_BYTES = 12;
- TYPE_UINT32 = 13;
- TYPE_ENUM = 14;
- TYPE_SFIXED32 = 15;
- TYPE_SFIXED64 = 16;
- TYPE_SINT32 = 17; // Uses ZigZag encoding.
- TYPE_SINT64 = 18; // Uses ZigZag encoding.
- };
+ TYPE_BYTES = 12;
+ TYPE_UINT32 = 13;
+ TYPE_ENUM = 14;
+ TYPE_SFIXED32 = 15;
+ TYPE_SFIXED64 = 16;
+ TYPE_SINT32 = 17; // Uses ZigZag encoding.
+ TYPE_SINT64 = 18; // Uses ZigZag encoding.
+ }
enum Label {
// 0 is reserved for errors
- LABEL_OPTIONAL = 1;
- LABEL_REQUIRED = 2;
- LABEL_REPEATED = 3;
- };
+ LABEL_OPTIONAL = 1;
+ LABEL_REQUIRED = 2;
+ LABEL_REPEATED = 3;
+ }
optional string name = 1;
optional int32 number = 3;
@@ -234,8 +235,8 @@ message EnumDescriptorProto {
// is inclusive such that it can appropriately represent the entire int32
// domain.
message EnumReservedRange {
- optional int32 start = 1; // Inclusive.
- optional int32 end = 2; // Inclusive.
+ optional int32 start = 1; // Inclusive.
+ optional int32 end = 2; // Inclusive.
}
// Range of reserved numeric values. Reserved numeric values may not be used
@@ -276,9 +277,9 @@ message MethodDescriptorProto {
optional MethodOptions options = 4;
// Identifies if client streams multiple client messages
- optional bool client_streaming = 5 [default=false];
+ optional bool client_streaming = 5 [default = false];
// Identifies if server streams multiple server messages
- optional bool server_streaming = 6 [default=false];
+ optional bool server_streaming = 6 [default = false];
}
@@ -314,7 +315,6 @@ message MethodDescriptorProto {
// If this turns out to be popular, a web service will be set up
// to automatically assign option numbers.
-
message FileOptions {
// Sets the Java package where classes generated from this .proto will be
@@ -337,7 +337,7 @@ message FileOptions {
// named by java_outer_classname. However, the outer class will still be
// generated to contain the file's getDescriptor() method as well as any
// top-level extensions defined in the file.
- optional bool java_multiple_files = 10 [default=false];
+ optional bool java_multiple_files = 10 [default = false];
// This option does nothing.
optional bool java_generate_equals_and_hash = 20 [deprecated=true];
@@ -348,17 +348,17 @@ message FileOptions {
// Message reflection will do the same.
// However, an extension field still accepts non-UTF-8 byte sequences.
// This option has no effect on when used with the lite runtime.
- optional bool java_string_check_utf8 = 27 [default=false];
+ optional bool java_string_check_utf8 = 27 [default = false];
// Generated classes can be optimized for speed or code size.
enum OptimizeMode {
- SPEED = 1; // Generate complete code for parsing, serialization,
- // etc.
- CODE_SIZE = 2; // Use ReflectionOps to implement these methods.
- LITE_RUNTIME = 3; // Generate code using MessageLite and the lite runtime.
+ SPEED = 1; // Generate complete code for parsing, serialization,
+ // etc.
+ CODE_SIZE = 2; // Use ReflectionOps to implement these methods.
+ LITE_RUNTIME = 3; // Generate code using MessageLite and the lite runtime.
}
- optional OptimizeMode optimize_for = 9 [default=SPEED];
+ optional OptimizeMode optimize_for = 9 [default = SPEED];
// Sets the Go package where structs generated from this .proto will be
// placed. If omitted, the Go package will be derived from the following:
@@ -369,6 +369,7 @@ message FileOptions {
+
// Should generic services be generated in each language? "Generic" services
// are not specific to any particular RPC system. They are generated by the
// main code generators in each language (without additional plugins).
@@ -379,20 +380,20 @@ message FileOptions {
// that generate code specific to your particular RPC system. Therefore,
// these default to false. Old code which depends on generic services should
// explicitly set them to true.
- optional bool cc_generic_services = 16 [default=false];
- optional bool java_generic_services = 17 [default=false];
- optional bool py_generic_services = 18 [default=false];
- optional bool php_generic_services = 42 [default=false];
+ optional bool cc_generic_services = 16 [default = false];
+ optional bool java_generic_services = 17 [default = false];
+ optional bool py_generic_services = 18 [default = false];
+ optional bool php_generic_services = 42 [default = false];
// Is this file deprecated?
// Depending on the target platform, this can emit Deprecated annotations
// for everything in the file, or it will be completely ignored; in the very
// least, this is a formalization for deprecating files.
- optional bool deprecated = 23 [default=false];
+ optional bool deprecated = 23 [default = false];
// Enables the use of arenas for the proto messages in this file. This applies
// only to generated classes for C++.
- optional bool cc_enable_arenas = 31 [default=false];
+ optional bool cc_enable_arenas = 31 [default = false];
// Sets the objective c class prefix which is prepended to all objective c
@@ -417,10 +418,9 @@ message FileOptions {
// determining the namespace.
optional string php_namespace = 41;
-
// Use this option to change the namespace of php generated metadata classes.
- // Default is empty. When this option is empty, the proto file name will be used
- // for determining the namespace.
+ // Default is empty. When this option is empty, the proto file name will be
+ // used for determining the namespace.
optional string php_metadata_namespace = 44;
// Use this option to change the package of ruby generated classes. Default
@@ -428,6 +428,7 @@ message FileOptions {
// determining the ruby package.
optional string ruby_package = 45;
+
// The parser stores options it doesn't recognize here.
// See the documentation for the "Options" section above.
repeated UninterpretedOption uninterpreted_option = 999;
@@ -458,18 +459,18 @@ message MessageOptions {
//
// Because this is an option, the above two restrictions are not enforced by
// the protocol compiler.
- optional bool message_set_wire_format = 1 [default=false];
+ optional bool message_set_wire_format = 1 [default = false];
// Disables the generation of the standard "descriptor()" accessor, which can
// conflict with a field of the same name. This is meant to make migration
// from proto1 easier; new code should avoid fields named "descriptor".
- optional bool no_standard_descriptor_accessor = 2 [default=false];
+ optional bool no_standard_descriptor_accessor = 2 [default = false];
// Is this message deprecated?
// Depending on the target platform, this can emit Deprecated annotations
// for the message, or it will be completely ignored; in the very least,
// this is a formalization for deprecating messages.
- optional bool deprecated = 3 [default=false];
+ optional bool deprecated = 3 [default = false];
// Whether the message is an automatically generated map entry type for the
// maps field.
@@ -486,7 +487,7 @@ message MessageOptions {
//
// Implementations may choose not to generate the map_entry=true message, but
// use a native map in the target language to hold the keys and values.
- // The reflection APIs in such implementions still need to work as
+ // The reflection APIs in such implementations still need to work as
// if the field is a repeated message field.
//
// NOTE: Do not set the option in .proto files. Always use the maps syntax
@@ -497,6 +498,7 @@ message MessageOptions {
//reserved 8; // javalite_serializable
//reserved 9; // javanano_as_lite
+
// The parser stores options it doesn't recognize here. See above.
repeated UninterpretedOption uninterpreted_option = 999;
@@ -576,16 +578,16 @@ message FieldOptions {
// implementation must either *always* check its required fields, or *never*
// check its required fields, regardless of whether or not the message has
// been parsed.
- optional bool lazy = 5 [default=false];
+ optional bool lazy = 5 [default = false];
// Is this field deprecated?
// Depending on the target platform, this can emit Deprecated annotations
// for accessors, or it will be completely ignored; in the very least, this
// is a formalization for deprecating fields.
- optional bool deprecated = 3 [default=false];
+ optional bool deprecated = 3 [default = false];
// For Google-internal migration only. Do not use.
- optional bool weak = 10 [default=false];
+ optional bool weak = 10 [default = false];
// The parser stores options it doesn't recognize here. See above.
@@ -615,7 +617,7 @@ message EnumOptions {
// Depending on the target platform, this can emit Deprecated annotations
// for the enum, or it will be completely ignored; in the very least, this
// is a formalization for deprecating enums.
- optional bool deprecated = 3 [default=false];
+ optional bool deprecated = 3 [default = false];
//reserved 5; // javanano_as_lite
@@ -631,7 +633,7 @@ message EnumValueOptions {
// Depending on the target platform, this can emit Deprecated annotations
// for the enum value, or it will be completely ignored; in the very least,
// this is a formalization for deprecating enum values.
- optional bool deprecated = 1 [default=false];
+ optional bool deprecated = 1 [default = false];
// The parser stores options it doesn't recognize here. See above.
repeated UninterpretedOption uninterpreted_option = 999;
@@ -651,7 +653,7 @@ message ServiceOptions {
// Depending on the target platform, this can emit Deprecated annotations
// for the service, or it will be completely ignored; in the very least,
// this is a formalization for deprecating services.
- optional bool deprecated = 33 [default=false];
+ optional bool deprecated = 33 [default = false];
// The parser stores options it doesn't recognize here. See above.
repeated UninterpretedOption uninterpreted_option = 999;
@@ -671,18 +673,18 @@ message MethodOptions {
// Depending on the target platform, this can emit Deprecated annotations
// for the method, or it will be completely ignored; in the very least,
// this is a formalization for deprecating methods.
- optional bool deprecated = 33 [default=false];
+ optional bool deprecated = 33 [default = false];
// Is this method side-effect-free (or safe in HTTP parlance), or idempotent,
// or neither? HTTP based RPC implementation may choose GET verb for safe
// methods, and PUT verb for idempotent methods instead of the default POST.
enum IdempotencyLevel {
IDEMPOTENCY_UNKNOWN = 0;
- NO_SIDE_EFFECTS = 1; // implies idempotent
- IDEMPOTENT = 2; // idempotent, but may have side effects
+ NO_SIDE_EFFECTS = 1; // implies idempotent
+ IDEMPOTENT = 2; // idempotent, but may have side effects
}
- optional IdempotencyLevel idempotency_level =
- 34 [default=IDEMPOTENCY_UNKNOWN];
+ optional IdempotencyLevel idempotency_level = 34
+ [default = IDEMPOTENCY_UNKNOWN];
// The parser stores options it doesn't recognize here. See above.
repeated UninterpretedOption uninterpreted_option = 999;
@@ -763,7 +765,7 @@ message SourceCodeInfo {
// beginning of the "extend" block and is shared by all extensions within
// the block.
// - Just because a location's span is a subset of some other location's span
- // does not mean that it is a descendent. For example, a "group" defines
+ // does not mean that it is a descendant. For example, a "group" defines
// both a type and a field in a single declaration. Thus, the locations
// corresponding to the type and field and their components will overlap.
// - Code which tries to interpret locations should probably be designed to
@@ -794,14 +796,14 @@ message SourceCodeInfo {
// [ 4, 3, 2, 7 ]
// this path refers to the whole field declaration (from the beginning
// of the label to the terminating semicolon).
- repeated int32 path = 1 [packed=true];
+ repeated int32 path = 1 [packed = true];
// Always has exactly three or four elements: start line, start column,
// end line (optional, otherwise assumed same as start line), end column.
// These are packed into a single field for efficiency. Note that line
// and column numbers are zero-based -- typically you will want to add
// 1 to each before displaying to a user.
- repeated int32 span = 2 [packed=true];
+ repeated int32 span = 2 [packed = true];
// If this SourceCodeInfo represents a complete declaration, these are any
// comments appearing before and after the declaration which appear to be
@@ -866,7 +868,7 @@ message GeneratedCodeInfo {
message Annotation {
// Identifies the element in the original source .proto file. This field
// is formatted the same as SourceCodeInfo.Location.path.
- repeated int32 path = 1 [packed=true];
+ repeated int32 path = 1 [packed = true];
// Identifies the filesystem path to the original source .proto.
optional string source_file = 2;
diff --git a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/duration.proto b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/duration.proto
index 8bbaa8b62..b14bea5d0 100644
--- a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/duration.proto
+++ b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/duration.proto
@@ -101,7 +101,6 @@ option objc_class_prefix = "GPB";
//
//
message Duration {
-
// Signed seconds of the span of time. Must be from -315,576,000,000
// to +315,576,000,000 inclusive. Note: these bounds are computed from:
// 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years
diff --git a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/struct.proto b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/struct.proto
index 4f78641fa..9db077159 100644
--- a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/struct.proto
+++ b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/struct.proto
@@ -40,7 +40,6 @@ option java_outer_classname = "StructProto";
option java_multiple_files = true;
option objc_class_prefix = "GPB";
-
// `Struct` represents a structured data value, consisting of fields
// which map to dynamically typed values. In some languages, `Struct`
// might be supported by a native representation. For example, in
diff --git a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/timestamp.proto b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/timestamp.proto
index 18cb7c3ea..0ebe36ea7 100644
--- a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/timestamp.proto
+++ b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/timestamp.proto
@@ -113,17 +113,18 @@ option objc_class_prefix = "GPB";
// 01:30 UTC on January 15, 2017.
//
// In JavaScript, one can convert a Date object to this format using the
-// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString)
+// standard
+// [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString)
// method. In Python, a standard `datetime.datetime` object can be converted
-// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime)
-// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one
-// can use the Joda Time's [`ISODateTimeFormat.dateTime()`](
+// to this format using
+// [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) with
+// the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one can use
+// the Joda Time's [`ISODateTimeFormat.dateTime()`](
// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime%2D%2D
// ) to obtain a formatter capable of generating timestamps in this format.
//
//
message Timestamp {
-
// Represents seconds of UTC time since Unix epoch
// 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
// 9999-12-31T23:59:59Z inclusive.
diff --git a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/type.proto b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/type.proto
index fcd15bfd7..cc626250d 100644
--- a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/type.proto
+++ b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/type.proto
@@ -64,44 +64,44 @@ message Field {
// Basic field types.
enum Kind {
// Field type unknown.
- TYPE_UNKNOWN = 0;
+ TYPE_UNKNOWN = 0;
// Field type double.
- TYPE_DOUBLE = 1;
+ TYPE_DOUBLE = 1;
// Field type float.
- TYPE_FLOAT = 2;
+ TYPE_FLOAT = 2;
// Field type int64.
- TYPE_INT64 = 3;
+ TYPE_INT64 = 3;
// Field type uint64.
- TYPE_UINT64 = 4;
+ TYPE_UINT64 = 4;
// Field type int32.
- TYPE_INT32 = 5;
+ TYPE_INT32 = 5;
// Field type fixed64.
- TYPE_FIXED64 = 6;
+ TYPE_FIXED64 = 6;
// Field type fixed32.
- TYPE_FIXED32 = 7;
+ TYPE_FIXED32 = 7;
// Field type bool.
- TYPE_BOOL = 8;
+ TYPE_BOOL = 8;
// Field type string.
- TYPE_STRING = 9;
+ TYPE_STRING = 9;
// Field type group. Proto2 syntax only, and deprecated.
- TYPE_GROUP = 10;
+ TYPE_GROUP = 10;
// Field type message.
- TYPE_MESSAGE = 11;
+ TYPE_MESSAGE = 11;
// Field type bytes.
- TYPE_BYTES = 12;
+ TYPE_BYTES = 12;
// Field type uint32.
- TYPE_UINT32 = 13;
+ TYPE_UINT32 = 13;
// Field type enum.
- TYPE_ENUM = 14;
+ TYPE_ENUM = 14;
// Field type sfixed32.
- TYPE_SFIXED32 = 15;
+ TYPE_SFIXED32 = 15;
// Field type sfixed64.
- TYPE_SFIXED64 = 16;
+ TYPE_SFIXED64 = 16;
// Field type sint32.
- TYPE_SINT32 = 17;
+ TYPE_SINT32 = 17;
// Field type sint64.
- TYPE_SINT64 = 18;
- };
+ TYPE_SINT64 = 18;
+ }
// Whether a field is optional, required, or repeated.
enum Cardinality {
diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go
index cacfa3923..18b2a3318 100644
--- a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go
+++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go
@@ -18,7 +18,7 @@ var _ = math.Inf
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
type FieldDescriptorProto_Type int32
@@ -1364,8 +1364,8 @@ type FileOptions struct {
// determining the namespace.
PhpNamespace *string `protobuf:"bytes,41,opt,name=php_namespace,json=phpNamespace" json:"php_namespace,omitempty"`
// Use this option to change the namespace of php generated metadata classes.
- // Default is empty. When this option is empty, the proto file name will be used
- // for determining the namespace.
+ // Default is empty. When this option is empty, the proto file name will be
+ // used for determining the namespace.
PhpMetadataNamespace *string `protobuf:"bytes,44,opt,name=php_metadata_namespace,json=phpMetadataNamespace" json:"php_metadata_namespace,omitempty"`
// Use this option to change the package of ruby generated classes. Default
// is empty. When this option is not set, the package name will be used for
@@ -1615,7 +1615,7 @@ type MessageOptions struct {
//
// Implementations may choose not to generate the map_entry=true message, but
// use a native map in the target language to hold the keys and values.
- // The reflection APIs in such implementions still need to work as
+ // The reflection APIs in such implementations still need to work as
// if the field is a repeated message field.
//
// NOTE: Do not set the option in .proto files. Always use the maps syntax
@@ -2363,7 +2363,7 @@ type SourceCodeInfo struct {
// beginning of the "extend" block and is shared by all extensions within
// the block.
// - Just because a location's span is a subset of some other location's span
- // does not mean that it is a descendent. For example, a "group" defines
+ // does not mean that it is a descendant. For example, a "group" defines
// both a type and a field in a single declaration. Thus, the locations
// corresponding to the type and field and their components will overlap.
// - Code which tries to interpret locations should probably be designed to
diff --git a/vendor/github.com/gogo/protobuf/types/any.pb.go b/vendor/github.com/gogo/protobuf/types/any.pb.go
index 202fe8e00..98e269d54 100644
--- a/vendor/github.com/gogo/protobuf/types/any.pb.go
+++ b/vendor/github.com/gogo/protobuf/types/any.pb.go
@@ -9,6 +9,7 @@ import (
proto "github.com/gogo/protobuf/proto"
io "io"
math "math"
+ math_bits "math/bits"
reflect "reflect"
strings "strings"
)
@@ -22,7 +23,7 @@ var _ = math.Inf
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
// `Any` contains an arbitrary serialized protocol buffer message along with a
// URL that describes the type of the serialized message.
@@ -155,7 +156,7 @@ func (m *Any) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Any.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -309,7 +310,7 @@ func valueToGoStringAny(v interface{}, typ string) string {
func (m *Any) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -317,36 +318,46 @@ func (m *Any) Marshal() (dAtA []byte, err error) {
}
func (m *Any) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Any) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.TypeUrl) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintAny(dAtA, i, uint64(len(m.TypeUrl)))
- i += copy(dAtA[i:], m.TypeUrl)
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
if len(m.Value) > 0 {
- dAtA[i] = 0x12
- i++
+ i -= len(m.Value)
+ copy(dAtA[i:], m.Value)
i = encodeVarintAny(dAtA, i, uint64(len(m.Value)))
- i += copy(dAtA[i:], m.Value)
+ i--
+ dAtA[i] = 0x12
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if len(m.TypeUrl) > 0 {
+ i -= len(m.TypeUrl)
+ copy(dAtA[i:], m.TypeUrl)
+ i = encodeVarintAny(dAtA, i, uint64(len(m.TypeUrl)))
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func encodeVarintAny(dAtA []byte, offset int, v uint64) int {
+ offset -= sovAny(v)
+ base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
- return offset + 1
+ return base
}
func NewPopulatedAny(r randyAny, easy bool) *Any {
this := &Any{}
@@ -455,14 +466,7 @@ func (m *Any) Size() (n int) {
}
func sovAny(x uint64) (n int) {
- for {
- n++
- x >>= 7
- if x == 0 {
- break
- }
- }
- return n
+ return (math_bits.Len64(x|1) + 6) / 7
}
func sozAny(x uint64) (n int) {
return sovAny(uint64((x << 1) ^ uint64((int64(x) >> 63))))
@@ -610,6 +614,7 @@ func (m *Any) Unmarshal(dAtA []byte) error {
func skipAny(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
+ depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
@@ -641,10 +646,8 @@ func skipAny(dAtA []byte) (n int, err error) {
break
}
}
- return iNdEx, nil
case 1:
iNdEx += 8
- return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
@@ -665,55 +668,30 @@ func skipAny(dAtA []byte) (n int, err error) {
return 0, ErrInvalidLengthAny
}
iNdEx += length
- if iNdEx < 0 {
- return 0, ErrInvalidLengthAny
- }
- return iNdEx, nil
case 3:
- for {
- var innerWire uint64
- var start int = iNdEx
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowAny
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- innerWire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- innerWireType := int(innerWire & 0x7)
- if innerWireType == 4 {
- break
- }
- next, err := skipAny(dAtA[start:])
- if err != nil {
- return 0, err
- }
- iNdEx = start + next
- if iNdEx < 0 {
- return 0, ErrInvalidLengthAny
- }
- }
- return iNdEx, nil
+ depth++
case 4:
- return iNdEx, nil
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupAny
+ }
+ depth--
case 5:
iNdEx += 4
- return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthAny
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
}
- panic("unreachable")
+ return 0, io.ErrUnexpectedEOF
}
var (
- ErrInvalidLengthAny = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowAny = fmt.Errorf("proto: integer overflow")
+ ErrInvalidLengthAny = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowAny = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupAny = fmt.Errorf("proto: unexpected end of group")
)
diff --git a/vendor/github.com/gogo/protobuf/types/api.pb.go b/vendor/github.com/gogo/protobuf/types/api.pb.go
index fe0eefd2d..58bf4b53b 100644
--- a/vendor/github.com/gogo/protobuf/types/api.pb.go
+++ b/vendor/github.com/gogo/protobuf/types/api.pb.go
@@ -9,6 +9,7 @@ import (
proto "github.com/gogo/protobuf/proto"
io "io"
math "math"
+ math_bits "math/bits"
reflect "reflect"
strings "strings"
)
@@ -22,7 +23,7 @@ var _ = math.Inf
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
// Api is a light-weight descriptor for an API Interface.
//
@@ -88,7 +89,7 @@ func (m *Api) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Api.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -194,7 +195,7 @@ func (m *Method) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Method.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -368,7 +369,7 @@ func (m *Mixin) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Mixin.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -862,7 +863,7 @@ func valueToGoStringApi(v interface{}, typ string) string {
func (m *Api) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -870,83 +871,99 @@ func (m *Api) Marshal() (dAtA []byte, err error) {
}
func (m *Api) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Api) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.Name) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintApi(dAtA, i, uint64(len(m.Name)))
- i += copy(dAtA[i:], m.Name)
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
- if len(m.Methods) > 0 {
- for _, msg := range m.Methods {
- dAtA[i] = 0x12
- i++
- i = encodeVarintApi(dAtA, i, uint64(msg.Size()))
- n, err := msg.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
+ if m.Syntax != 0 {
+ i = encodeVarintApi(dAtA, i, uint64(m.Syntax))
+ i--
+ dAtA[i] = 0x38
+ }
+ if len(m.Mixins) > 0 {
+ for iNdEx := len(m.Mixins) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Mixins[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintApi(dAtA, i, uint64(size))
}
- i += n
+ i--
+ dAtA[i] = 0x32
}
}
- if len(m.Options) > 0 {
- for _, msg := range m.Options {
- dAtA[i] = 0x1a
- i++
- i = encodeVarintApi(dAtA, i, uint64(msg.Size()))
- n, err := msg.MarshalTo(dAtA[i:])
+ if m.SourceContext != nil {
+ {
+ size, err := m.SourceContext.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
- i += n
+ i -= size
+ i = encodeVarintApi(dAtA, i, uint64(size))
}
+ i--
+ dAtA[i] = 0x2a
}
if len(m.Version) > 0 {
- dAtA[i] = 0x22
- i++
+ i -= len(m.Version)
+ copy(dAtA[i:], m.Version)
i = encodeVarintApi(dAtA, i, uint64(len(m.Version)))
- i += copy(dAtA[i:], m.Version)
+ i--
+ dAtA[i] = 0x22
}
- if m.SourceContext != nil {
- dAtA[i] = 0x2a
- i++
- i = encodeVarintApi(dAtA, i, uint64(m.SourceContext.Size()))
- n1, err := m.SourceContext.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
+ if len(m.Options) > 0 {
+ for iNdEx := len(m.Options) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Options[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintApi(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
}
- i += n1
}
- if len(m.Mixins) > 0 {
- for _, msg := range m.Mixins {
- dAtA[i] = 0x32
- i++
- i = encodeVarintApi(dAtA, i, uint64(msg.Size()))
- n, err := msg.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
+ if len(m.Methods) > 0 {
+ for iNdEx := len(m.Methods) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Methods[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintApi(dAtA, i, uint64(size))
}
- i += n
+ i--
+ dAtA[i] = 0x12
}
}
- if m.Syntax != 0 {
- dAtA[i] = 0x38
- i++
- i = encodeVarintApi(dAtA, i, uint64(m.Syntax))
- }
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if len(m.Name) > 0 {
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintApi(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *Method) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -954,75 +971,86 @@ func (m *Method) Marshal() (dAtA []byte, err error) {
}
func (m *Method) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Method) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.Name) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintApi(dAtA, i, uint64(len(m.Name)))
- i += copy(dAtA[i:], m.Name)
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
- if len(m.RequestTypeUrl) > 0 {
- dAtA[i] = 0x12
- i++
- i = encodeVarintApi(dAtA, i, uint64(len(m.RequestTypeUrl)))
- i += copy(dAtA[i:], m.RequestTypeUrl)
+ if m.Syntax != 0 {
+ i = encodeVarintApi(dAtA, i, uint64(m.Syntax))
+ i--
+ dAtA[i] = 0x38
}
- if m.RequestStreaming {
- dAtA[i] = 0x18
- i++
- if m.RequestStreaming {
+ if len(m.Options) > 0 {
+ for iNdEx := len(m.Options) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Options[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintApi(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x32
+ }
+ }
+ if m.ResponseStreaming {
+ i--
+ if m.ResponseStreaming {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
- i++
+ i--
+ dAtA[i] = 0x28
}
if len(m.ResponseTypeUrl) > 0 {
- dAtA[i] = 0x22
- i++
+ i -= len(m.ResponseTypeUrl)
+ copy(dAtA[i:], m.ResponseTypeUrl)
i = encodeVarintApi(dAtA, i, uint64(len(m.ResponseTypeUrl)))
- i += copy(dAtA[i:], m.ResponseTypeUrl)
+ i--
+ dAtA[i] = 0x22
}
- if m.ResponseStreaming {
- dAtA[i] = 0x28
- i++
- if m.ResponseStreaming {
+ if m.RequestStreaming {
+ i--
+ if m.RequestStreaming {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
- i++
- }
- if len(m.Options) > 0 {
- for _, msg := range m.Options {
- dAtA[i] = 0x32
- i++
- i = encodeVarintApi(dAtA, i, uint64(msg.Size()))
- n, err := msg.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
- }
- i += n
- }
+ i--
+ dAtA[i] = 0x18
}
- if m.Syntax != 0 {
- dAtA[i] = 0x38
- i++
- i = encodeVarintApi(dAtA, i, uint64(m.Syntax))
+ if len(m.RequestTypeUrl) > 0 {
+ i -= len(m.RequestTypeUrl)
+ copy(dAtA[i:], m.RequestTypeUrl)
+ i = encodeVarintApi(dAtA, i, uint64(len(m.RequestTypeUrl)))
+ i--
+ dAtA[i] = 0x12
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if len(m.Name) > 0 {
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintApi(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *Mixin) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1030,48 +1058,58 @@ func (m *Mixin) Marshal() (dAtA []byte, err error) {
}
func (m *Mixin) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Mixin) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.Name) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintApi(dAtA, i, uint64(len(m.Name)))
- i += copy(dAtA[i:], m.Name)
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
if len(m.Root) > 0 {
- dAtA[i] = 0x12
- i++
+ i -= len(m.Root)
+ copy(dAtA[i:], m.Root)
i = encodeVarintApi(dAtA, i, uint64(len(m.Root)))
- i += copy(dAtA[i:], m.Root)
+ i--
+ dAtA[i] = 0x12
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if len(m.Name) > 0 {
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintApi(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func encodeVarintApi(dAtA []byte, offset int, v uint64) int {
+ offset -= sovApi(v)
+ base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
- return offset + 1
+ return base
}
func NewPopulatedApi(r randyApi, easy bool) *Api {
this := &Api{}
this.Name = string(randStringApi(r))
- if r.Intn(10) != 0 {
+ if r.Intn(5) != 0 {
v1 := r.Intn(5)
this.Methods = make([]*Method, v1)
for i := 0; i < v1; i++ {
this.Methods[i] = NewPopulatedMethod(r, easy)
}
}
- if r.Intn(10) != 0 {
+ if r.Intn(5) != 0 {
v2 := r.Intn(5)
this.Options = make([]*Option, v2)
for i := 0; i < v2; i++ {
@@ -1079,10 +1117,10 @@ func NewPopulatedApi(r randyApi, easy bool) *Api {
}
}
this.Version = string(randStringApi(r))
- if r.Intn(10) != 0 {
+ if r.Intn(5) != 0 {
this.SourceContext = NewPopulatedSourceContext(r, easy)
}
- if r.Intn(10) != 0 {
+ if r.Intn(5) != 0 {
v3 := r.Intn(5)
this.Mixins = make([]*Mixin, v3)
for i := 0; i < v3; i++ {
@@ -1103,7 +1141,7 @@ func NewPopulatedMethod(r randyApi, easy bool) *Method {
this.RequestStreaming = bool(bool(r.Intn(2) == 0))
this.ResponseTypeUrl = string(randStringApi(r))
this.ResponseStreaming = bool(bool(r.Intn(2) == 0))
- if r.Intn(10) != 0 {
+ if r.Intn(5) != 0 {
v4 := r.Intn(5)
this.Options = make([]*Option, v4)
for i := 0; i < v4; i++ {
@@ -1304,14 +1342,7 @@ func (m *Mixin) Size() (n int) {
}
func sovApi(x uint64) (n int) {
- for {
- n++
- x >>= 7
- if x == 0 {
- break
- }
- }
- return n
+ return (math_bits.Len64(x|1) + 6) / 7
}
func sozApi(x uint64) (n int) {
return sovApi(uint64((x << 1) ^ uint64((int64(x) >> 63))))
@@ -1320,13 +1351,28 @@ func (this *Api) String() string {
if this == nil {
return "nil"
}
+ repeatedStringForMethods := "[]*Method{"
+ for _, f := range this.Methods {
+ repeatedStringForMethods += strings.Replace(f.String(), "Method", "Method", 1) + ","
+ }
+ repeatedStringForMethods += "}"
+ repeatedStringForOptions := "[]*Option{"
+ for _, f := range this.Options {
+ repeatedStringForOptions += strings.Replace(fmt.Sprintf("%v", f), "Option", "Option", 1) + ","
+ }
+ repeatedStringForOptions += "}"
+ repeatedStringForMixins := "[]*Mixin{"
+ for _, f := range this.Mixins {
+ repeatedStringForMixins += strings.Replace(f.String(), "Mixin", "Mixin", 1) + ","
+ }
+ repeatedStringForMixins += "}"
s := strings.Join([]string{`&Api{`,
`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
- `Methods:` + strings.Replace(fmt.Sprintf("%v", this.Methods), "Method", "Method", 1) + `,`,
- `Options:` + strings.Replace(fmt.Sprintf("%v", this.Options), "Option", "Option", 1) + `,`,
+ `Methods:` + repeatedStringForMethods + `,`,
+ `Options:` + repeatedStringForOptions + `,`,
`Version:` + fmt.Sprintf("%v", this.Version) + `,`,
`SourceContext:` + strings.Replace(fmt.Sprintf("%v", this.SourceContext), "SourceContext", "SourceContext", 1) + `,`,
- `Mixins:` + strings.Replace(fmt.Sprintf("%v", this.Mixins), "Mixin", "Mixin", 1) + `,`,
+ `Mixins:` + repeatedStringForMixins + `,`,
`Syntax:` + fmt.Sprintf("%v", this.Syntax) + `,`,
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
`}`,
@@ -1337,13 +1383,18 @@ func (this *Method) String() string {
if this == nil {
return "nil"
}
+ repeatedStringForOptions := "[]*Option{"
+ for _, f := range this.Options {
+ repeatedStringForOptions += strings.Replace(fmt.Sprintf("%v", f), "Option", "Option", 1) + ","
+ }
+ repeatedStringForOptions += "}"
s := strings.Join([]string{`&Method{`,
`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
`RequestTypeUrl:` + fmt.Sprintf("%v", this.RequestTypeUrl) + `,`,
`RequestStreaming:` + fmt.Sprintf("%v", this.RequestStreaming) + `,`,
`ResponseTypeUrl:` + fmt.Sprintf("%v", this.ResponseTypeUrl) + `,`,
`ResponseStreaming:` + fmt.Sprintf("%v", this.ResponseStreaming) + `,`,
- `Options:` + strings.Replace(fmt.Sprintf("%v", this.Options), "Option", "Option", 1) + `,`,
+ `Options:` + repeatedStringForOptions + `,`,
`Syntax:` + fmt.Sprintf("%v", this.Syntax) + `,`,
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
`}`,
@@ -2009,6 +2060,7 @@ func (m *Mixin) Unmarshal(dAtA []byte) error {
func skipApi(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
+ depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
@@ -2040,10 +2092,8 @@ func skipApi(dAtA []byte) (n int, err error) {
break
}
}
- return iNdEx, nil
case 1:
iNdEx += 8
- return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
@@ -2064,55 +2114,30 @@ func skipApi(dAtA []byte) (n int, err error) {
return 0, ErrInvalidLengthApi
}
iNdEx += length
- if iNdEx < 0 {
- return 0, ErrInvalidLengthApi
- }
- return iNdEx, nil
case 3:
- for {
- var innerWire uint64
- var start int = iNdEx
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowApi
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- innerWire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- innerWireType := int(innerWire & 0x7)
- if innerWireType == 4 {
- break
- }
- next, err := skipApi(dAtA[start:])
- if err != nil {
- return 0, err
- }
- iNdEx = start + next
- if iNdEx < 0 {
- return 0, ErrInvalidLengthApi
- }
- }
- return iNdEx, nil
+ depth++
case 4:
- return iNdEx, nil
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupApi
+ }
+ depth--
case 5:
iNdEx += 4
- return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthApi
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
}
- panic("unreachable")
+ return 0, io.ErrUnexpectedEOF
}
var (
- ErrInvalidLengthApi = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowApi = fmt.Errorf("proto: integer overflow")
+ ErrInvalidLengthApi = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowApi = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupApi = fmt.Errorf("proto: unexpected end of group")
)
diff --git a/vendor/github.com/gogo/protobuf/types/duration.pb.go b/vendor/github.com/gogo/protobuf/types/duration.pb.go
index f328ee0e2..3959f0669 100644
--- a/vendor/github.com/gogo/protobuf/types/duration.pb.go
+++ b/vendor/github.com/gogo/protobuf/types/duration.pb.go
@@ -9,6 +9,7 @@ import (
proto "github.com/gogo/protobuf/proto"
io "io"
math "math"
+ math_bits "math/bits"
reflect "reflect"
strings "strings"
)
@@ -22,7 +23,7 @@ var _ = math.Inf
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
// A Duration represents a signed, fixed-length span of time represented
// as a count of seconds and fractions of seconds at nanosecond
@@ -115,7 +116,7 @@ func (m *Duration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Duration.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -272,7 +273,7 @@ func valueToGoStringDuration(v interface{}, typ string) string {
func (m *Duration) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -280,34 +281,42 @@ func (m *Duration) Marshal() (dAtA []byte, err error) {
}
func (m *Duration) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Duration) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if m.Seconds != 0 {
- dAtA[i] = 0x8
- i++
- i = encodeVarintDuration(dAtA, i, uint64(m.Seconds))
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
if m.Nanos != 0 {
- dAtA[i] = 0x10
- i++
i = encodeVarintDuration(dAtA, i, uint64(m.Nanos))
+ i--
+ dAtA[i] = 0x10
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if m.Seconds != 0 {
+ i = encodeVarintDuration(dAtA, i, uint64(m.Seconds))
+ i--
+ dAtA[i] = 0x8
}
- return i, nil
+ return len(dAtA) - i, nil
}
func encodeVarintDuration(dAtA []byte, offset int, v uint64) int {
+ offset -= sovDuration(v)
+ base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
- return offset + 1
+ return base
}
func (m *Duration) Size() (n int) {
if m == nil {
@@ -328,14 +337,7 @@ func (m *Duration) Size() (n int) {
}
func sovDuration(x uint64) (n int) {
- for {
- n++
- x >>= 7
- if x == 0 {
- break
- }
- }
- return n
+ return (math_bits.Len64(x|1) + 6) / 7
}
func sozDuration(x uint64) (n int) {
return sovDuration(uint64((x << 1) ^ uint64((int64(x) >> 63))))
@@ -435,6 +437,7 @@ func (m *Duration) Unmarshal(dAtA []byte) error {
func skipDuration(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
+ depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
@@ -466,10 +469,8 @@ func skipDuration(dAtA []byte) (n int, err error) {
break
}
}
- return iNdEx, nil
case 1:
iNdEx += 8
- return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
@@ -490,55 +491,30 @@ func skipDuration(dAtA []byte) (n int, err error) {
return 0, ErrInvalidLengthDuration
}
iNdEx += length
- if iNdEx < 0 {
- return 0, ErrInvalidLengthDuration
- }
- return iNdEx, nil
case 3:
- for {
- var innerWire uint64
- var start int = iNdEx
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowDuration
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- innerWire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- innerWireType := int(innerWire & 0x7)
- if innerWireType == 4 {
- break
- }
- next, err := skipDuration(dAtA[start:])
- if err != nil {
- return 0, err
- }
- iNdEx = start + next
- if iNdEx < 0 {
- return 0, ErrInvalidLengthDuration
- }
- }
- return iNdEx, nil
+ depth++
case 4:
- return iNdEx, nil
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupDuration
+ }
+ depth--
case 5:
iNdEx += 4
- return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthDuration
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
}
- panic("unreachable")
+ return 0, io.ErrUnexpectedEOF
}
var (
- ErrInvalidLengthDuration = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowDuration = fmt.Errorf("proto: integer overflow")
+ ErrInvalidLengthDuration = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowDuration = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupDuration = fmt.Errorf("proto: unexpected end of group")
)
diff --git a/vendor/github.com/gogo/protobuf/types/empty.pb.go b/vendor/github.com/gogo/protobuf/types/empty.pb.go
index 85881878f..17e3aa558 100644
--- a/vendor/github.com/gogo/protobuf/types/empty.pb.go
+++ b/vendor/github.com/gogo/protobuf/types/empty.pb.go
@@ -9,6 +9,7 @@ import (
proto "github.com/gogo/protobuf/proto"
io "io"
math "math"
+ math_bits "math/bits"
reflect "reflect"
strings "strings"
)
@@ -22,7 +23,7 @@ var _ = math.Inf
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
// A generic empty message that you can re-use to avoid defining duplicated
// empty messages in your APIs. A typical example is to use it as the request
@@ -53,7 +54,7 @@ func (m *Empty) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Empty.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -173,7 +174,7 @@ func valueToGoStringEmpty(v interface{}, typ string) string {
func (m *Empty) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -181,24 +182,32 @@ func (m *Empty) Marshal() (dAtA []byte, err error) {
}
func (m *Empty) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Empty) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
- return i, nil
+ return len(dAtA) - i, nil
}
func encodeVarintEmpty(dAtA []byte, offset int, v uint64) int {
+ offset -= sovEmpty(v)
+ base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
- return offset + 1
+ return base
}
func NewPopulatedEmpty(r randyEmpty, easy bool) *Empty {
this := &Empty{}
@@ -293,14 +302,7 @@ func (m *Empty) Size() (n int) {
}
func sovEmpty(x uint64) (n int) {
- for {
- n++
- x >>= 7
- if x == 0 {
- break
- }
- }
- return n
+ return (math_bits.Len64(x|1) + 6) / 7
}
func sozEmpty(x uint64) (n int) {
return sovEmpty(uint64((x << 1) ^ uint64((int64(x) >> 63))))
@@ -380,6 +382,7 @@ func (m *Empty) Unmarshal(dAtA []byte) error {
func skipEmpty(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
+ depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
@@ -411,10 +414,8 @@ func skipEmpty(dAtA []byte) (n int, err error) {
break
}
}
- return iNdEx, nil
case 1:
iNdEx += 8
- return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
@@ -435,55 +436,30 @@ func skipEmpty(dAtA []byte) (n int, err error) {
return 0, ErrInvalidLengthEmpty
}
iNdEx += length
- if iNdEx < 0 {
- return 0, ErrInvalidLengthEmpty
- }
- return iNdEx, nil
case 3:
- for {
- var innerWire uint64
- var start int = iNdEx
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowEmpty
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- innerWire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- innerWireType := int(innerWire & 0x7)
- if innerWireType == 4 {
- break
- }
- next, err := skipEmpty(dAtA[start:])
- if err != nil {
- return 0, err
- }
- iNdEx = start + next
- if iNdEx < 0 {
- return 0, ErrInvalidLengthEmpty
- }
- }
- return iNdEx, nil
+ depth++
case 4:
- return iNdEx, nil
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupEmpty
+ }
+ depth--
case 5:
iNdEx += 4
- return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthEmpty
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
}
- panic("unreachable")
+ return 0, io.ErrUnexpectedEOF
}
var (
- ErrInvalidLengthEmpty = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowEmpty = fmt.Errorf("proto: integer overflow")
+ ErrInvalidLengthEmpty = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowEmpty = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupEmpty = fmt.Errorf("proto: unexpected end of group")
)
diff --git a/vendor/github.com/gogo/protobuf/types/field_mask.pb.go b/vendor/github.com/gogo/protobuf/types/field_mask.pb.go
index b401a2b3f..7226b57f7 100644
--- a/vendor/github.com/gogo/protobuf/types/field_mask.pb.go
+++ b/vendor/github.com/gogo/protobuf/types/field_mask.pb.go
@@ -9,6 +9,7 @@ import (
proto "github.com/gogo/protobuf/proto"
io "io"
math "math"
+ math_bits "math/bits"
reflect "reflect"
strings "strings"
)
@@ -22,7 +23,7 @@ var _ = math.Inf
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
// `FieldMask` represents a set of symbolic field paths, for example:
//
@@ -244,7 +245,7 @@ func (m *FieldMask) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_FieldMask.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -396,7 +397,7 @@ func valueToGoStringFieldMask(v interface{}, typ string) string {
func (m *FieldMask) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -404,39 +405,41 @@ func (m *FieldMask) Marshal() (dAtA []byte, err error) {
}
func (m *FieldMask) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *FieldMask) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if len(m.Paths) > 0 {
- for _, s := range m.Paths {
+ for iNdEx := len(m.Paths) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Paths[iNdEx])
+ copy(dAtA[i:], m.Paths[iNdEx])
+ i = encodeVarintFieldMask(dAtA, i, uint64(len(m.Paths[iNdEx])))
+ i--
dAtA[i] = 0xa
- i++
- l = len(s)
- for l >= 1<<7 {
- dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
- l >>= 7
- i++
- }
- dAtA[i] = uint8(l)
- i++
- i += copy(dAtA[i:], s)
}
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
- }
- return i, nil
+ return len(dAtA) - i, nil
}
func encodeVarintFieldMask(dAtA []byte, offset int, v uint64) int {
+ offset -= sovFieldMask(v)
+ base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
- return offset + 1
+ return base
}
func NewPopulatedFieldMask(r randyFieldMask, easy bool) *FieldMask {
this := &FieldMask{}
@@ -542,14 +545,7 @@ func (m *FieldMask) Size() (n int) {
}
func sovFieldMask(x uint64) (n int) {
- for {
- n++
- x >>= 7
- if x == 0 {
- break
- }
- }
- return n
+ return (math_bits.Len64(x|1) + 6) / 7
}
func sozFieldMask(x uint64) (n int) {
return sovFieldMask(uint64((x << 1) ^ uint64((int64(x) >> 63))))
@@ -662,6 +658,7 @@ func (m *FieldMask) Unmarshal(dAtA []byte) error {
func skipFieldMask(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
+ depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
@@ -693,10 +690,8 @@ func skipFieldMask(dAtA []byte) (n int, err error) {
break
}
}
- return iNdEx, nil
case 1:
iNdEx += 8
- return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
@@ -717,55 +712,30 @@ func skipFieldMask(dAtA []byte) (n int, err error) {
return 0, ErrInvalidLengthFieldMask
}
iNdEx += length
- if iNdEx < 0 {
- return 0, ErrInvalidLengthFieldMask
- }
- return iNdEx, nil
case 3:
- for {
- var innerWire uint64
- var start int = iNdEx
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowFieldMask
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- innerWire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- innerWireType := int(innerWire & 0x7)
- if innerWireType == 4 {
- break
- }
- next, err := skipFieldMask(dAtA[start:])
- if err != nil {
- return 0, err
- }
- iNdEx = start + next
- if iNdEx < 0 {
- return 0, ErrInvalidLengthFieldMask
- }
- }
- return iNdEx, nil
+ depth++
case 4:
- return iNdEx, nil
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupFieldMask
+ }
+ depth--
case 5:
iNdEx += 4
- return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthFieldMask
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
}
- panic("unreachable")
+ return 0, io.ErrUnexpectedEOF
}
var (
- ErrInvalidLengthFieldMask = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowFieldMask = fmt.Errorf("proto: integer overflow")
+ ErrInvalidLengthFieldMask = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowFieldMask = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupFieldMask = fmt.Errorf("proto: unexpected end of group")
)
diff --git a/vendor/github.com/gogo/protobuf/types/source_context.pb.go b/vendor/github.com/gogo/protobuf/types/source_context.pb.go
index 3688840b5..61045ce10 100644
--- a/vendor/github.com/gogo/protobuf/types/source_context.pb.go
+++ b/vendor/github.com/gogo/protobuf/types/source_context.pb.go
@@ -9,6 +9,7 @@ import (
proto "github.com/gogo/protobuf/proto"
io "io"
math "math"
+ math_bits "math/bits"
reflect "reflect"
strings "strings"
)
@@ -22,7 +23,7 @@ var _ = math.Inf
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
// `SourceContext` represents information about the source of a
// protobuf element, like the file in which it is defined.
@@ -48,7 +49,7 @@ func (m *SourceContext) XXX_Marshal(b []byte, deterministic bool) ([]byte, error
return xxx_messageInfo_SourceContext.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -190,7 +191,7 @@ func valueToGoStringSourceContext(v interface{}, typ string) string {
func (m *SourceContext) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -198,30 +199,39 @@ func (m *SourceContext) Marshal() (dAtA []byte, err error) {
}
func (m *SourceContext) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SourceContext) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if len(m.FileName) > 0 {
- dAtA[i] = 0xa
- i++
+ i -= len(m.FileName)
+ copy(dAtA[i:], m.FileName)
i = encodeVarintSourceContext(dAtA, i, uint64(len(m.FileName)))
- i += copy(dAtA[i:], m.FileName)
- }
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func encodeVarintSourceContext(dAtA []byte, offset int, v uint64) int {
+ offset -= sovSourceContext(v)
+ base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
- return offset + 1
+ return base
}
func NewPopulatedSourceContext(r randySourceContext, easy bool) *SourceContext {
this := &SourceContext{}
@@ -321,14 +331,7 @@ func (m *SourceContext) Size() (n int) {
}
func sovSourceContext(x uint64) (n int) {
- for {
- n++
- x >>= 7
- if x == 0 {
- break
- }
- }
- return n
+ return (math_bits.Len64(x|1) + 6) / 7
}
func sozSourceContext(x uint64) (n int) {
return sovSourceContext(uint64((x << 1) ^ uint64((int64(x) >> 63))))
@@ -441,6 +444,7 @@ func (m *SourceContext) Unmarshal(dAtA []byte) error {
func skipSourceContext(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
+ depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
@@ -472,10 +476,8 @@ func skipSourceContext(dAtA []byte) (n int, err error) {
break
}
}
- return iNdEx, nil
case 1:
iNdEx += 8
- return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
@@ -496,55 +498,30 @@ func skipSourceContext(dAtA []byte) (n int, err error) {
return 0, ErrInvalidLengthSourceContext
}
iNdEx += length
- if iNdEx < 0 {
- return 0, ErrInvalidLengthSourceContext
- }
- return iNdEx, nil
case 3:
- for {
- var innerWire uint64
- var start int = iNdEx
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowSourceContext
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- innerWire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- innerWireType := int(innerWire & 0x7)
- if innerWireType == 4 {
- break
- }
- next, err := skipSourceContext(dAtA[start:])
- if err != nil {
- return 0, err
- }
- iNdEx = start + next
- if iNdEx < 0 {
- return 0, ErrInvalidLengthSourceContext
- }
- }
- return iNdEx, nil
+ depth++
case 4:
- return iNdEx, nil
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupSourceContext
+ }
+ depth--
case 5:
iNdEx += 4
- return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthSourceContext
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
}
- panic("unreachable")
+ return 0, io.ErrUnexpectedEOF
}
var (
- ErrInvalidLengthSourceContext = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowSourceContext = fmt.Errorf("proto: integer overflow")
+ ErrInvalidLengthSourceContext = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowSourceContext = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupSourceContext = fmt.Errorf("proto: unexpected end of group")
)
diff --git a/vendor/github.com/gogo/protobuf/types/struct.pb.go b/vendor/github.com/gogo/protobuf/types/struct.pb.go
index 63fd17b03..cea553eef 100644
--- a/vendor/github.com/gogo/protobuf/types/struct.pb.go
+++ b/vendor/github.com/gogo/protobuf/types/struct.pb.go
@@ -11,6 +11,7 @@ import (
github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
io "io"
math "math"
+ math_bits "math/bits"
reflect "reflect"
strconv "strconv"
strings "strings"
@@ -25,7 +26,7 @@ var _ = math.Inf
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
// `NullValue` is a singleton enumeration to represent the null value for the
// `Value` type union.
@@ -82,7 +83,7 @@ func (m *Struct) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Struct.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -148,7 +149,7 @@ func (m *Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Value.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -172,25 +173,26 @@ type isValue_Kind interface {
Equal(interface{}) bool
MarshalTo([]byte) (int, error)
Size() int
+ Compare(interface{}) int
}
type Value_NullValue struct {
- NullValue NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,proto3,enum=google.protobuf.NullValue,oneof"`
+ NullValue NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,proto3,enum=google.protobuf.NullValue,oneof" json:"null_value,omitempty"`
}
type Value_NumberValue struct {
- NumberValue float64 `protobuf:"fixed64,2,opt,name=number_value,json=numberValue,proto3,oneof"`
+ NumberValue float64 `protobuf:"fixed64,2,opt,name=number_value,json=numberValue,proto3,oneof" json:"number_value,omitempty"`
}
type Value_StringValue struct {
- StringValue string `protobuf:"bytes,3,opt,name=string_value,json=stringValue,proto3,oneof"`
+ StringValue string `protobuf:"bytes,3,opt,name=string_value,json=stringValue,proto3,oneof" json:"string_value,omitempty"`
}
type Value_BoolValue struct {
- BoolValue bool `protobuf:"varint,4,opt,name=bool_value,json=boolValue,proto3,oneof"`
+ BoolValue bool `protobuf:"varint,4,opt,name=bool_value,json=boolValue,proto3,oneof" json:"bool_value,omitempty"`
}
type Value_StructValue struct {
- StructValue *Struct `protobuf:"bytes,5,opt,name=struct_value,json=structValue,proto3,oneof"`
+ StructValue *Struct `protobuf:"bytes,5,opt,name=struct_value,json=structValue,proto3,oneof" json:"struct_value,omitempty"`
}
type Value_ListValue struct {
- ListValue *ListValue `protobuf:"bytes,6,opt,name=list_value,json=listValue,proto3,oneof"`
+ ListValue *ListValue `protobuf:"bytes,6,opt,name=list_value,json=listValue,proto3,oneof" json:"list_value,omitempty"`
}
func (*Value_NullValue) isValue_Kind() {}
@@ -249,9 +251,9 @@ func (m *Value) GetListValue() *ListValue {
return nil
}
-// XXX_OneofFuncs is for the internal use of the proto package.
-func (*Value) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
- return _Value_OneofMarshaler, _Value_OneofUnmarshaler, _Value_OneofSizer, []interface{}{
+// XXX_OneofWrappers is for the internal use of the proto package.
+func (*Value) XXX_OneofWrappers() []interface{} {
+ return []interface{}{
(*Value_NullValue)(nil),
(*Value_NumberValue)(nil),
(*Value_StringValue)(nil),
@@ -261,129 +263,6 @@ func (*Value) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error,
}
}
-func _Value_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
- m := msg.(*Value)
- // kind
- switch x := m.Kind.(type) {
- case *Value_NullValue:
- _ = b.EncodeVarint(1<<3 | proto.WireVarint)
- _ = b.EncodeVarint(uint64(x.NullValue))
- case *Value_NumberValue:
- _ = b.EncodeVarint(2<<3 | proto.WireFixed64)
- _ = b.EncodeFixed64(math.Float64bits(x.NumberValue))
- case *Value_StringValue:
- _ = b.EncodeVarint(3<<3 | proto.WireBytes)
- _ = b.EncodeStringBytes(x.StringValue)
- case *Value_BoolValue:
- t := uint64(0)
- if x.BoolValue {
- t = 1
- }
- _ = b.EncodeVarint(4<<3 | proto.WireVarint)
- _ = b.EncodeVarint(t)
- case *Value_StructValue:
- _ = b.EncodeVarint(5<<3 | proto.WireBytes)
- if err := b.EncodeMessage(x.StructValue); err != nil {
- return err
- }
- case *Value_ListValue:
- _ = b.EncodeVarint(6<<3 | proto.WireBytes)
- if err := b.EncodeMessage(x.ListValue); err != nil {
- return err
- }
- case nil:
- default:
- return fmt.Errorf("Value.Kind has unexpected type %T", x)
- }
- return nil
-}
-
-func _Value_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
- m := msg.(*Value)
- switch tag {
- case 1: // kind.null_value
- if wire != proto.WireVarint {
- return true, proto.ErrInternalBadWireType
- }
- x, err := b.DecodeVarint()
- m.Kind = &Value_NullValue{NullValue(x)}
- return true, err
- case 2: // kind.number_value
- if wire != proto.WireFixed64 {
- return true, proto.ErrInternalBadWireType
- }
- x, err := b.DecodeFixed64()
- m.Kind = &Value_NumberValue{math.Float64frombits(x)}
- return true, err
- case 3: // kind.string_value
- if wire != proto.WireBytes {
- return true, proto.ErrInternalBadWireType
- }
- x, err := b.DecodeStringBytes()
- m.Kind = &Value_StringValue{x}
- return true, err
- case 4: // kind.bool_value
- if wire != proto.WireVarint {
- return true, proto.ErrInternalBadWireType
- }
- x, err := b.DecodeVarint()
- m.Kind = &Value_BoolValue{x != 0}
- return true, err
- case 5: // kind.struct_value
- if wire != proto.WireBytes {
- return true, proto.ErrInternalBadWireType
- }
- msg := new(Struct)
- err := b.DecodeMessage(msg)
- m.Kind = &Value_StructValue{msg}
- return true, err
- case 6: // kind.list_value
- if wire != proto.WireBytes {
- return true, proto.ErrInternalBadWireType
- }
- msg := new(ListValue)
- err := b.DecodeMessage(msg)
- m.Kind = &Value_ListValue{msg}
- return true, err
- default:
- return false, nil
- }
-}
-
-func _Value_OneofSizer(msg proto.Message) (n int) {
- m := msg.(*Value)
- // kind
- switch x := m.Kind.(type) {
- case *Value_NullValue:
- n += 1 // tag and wire
- n += proto.SizeVarint(uint64(x.NullValue))
- case *Value_NumberValue:
- n += 1 // tag and wire
- n += 8
- case *Value_StringValue:
- n += 1 // tag and wire
- n += proto.SizeVarint(uint64(len(x.StringValue)))
- n += len(x.StringValue)
- case *Value_BoolValue:
- n += 1 // tag and wire
- n += 1
- case *Value_StructValue:
- s := proto.Size(x.StructValue)
- n += 1 // tag and wire
- n += proto.SizeVarint(uint64(s))
- n += s
- case *Value_ListValue:
- s := proto.Size(x.ListValue)
- n += 1 // tag and wire
- n += proto.SizeVarint(uint64(s))
- n += s
- case nil:
- default:
- panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
- }
- return n
-}
-
func (*Value) XXX_MessageName() string {
return "google.protobuf.Value"
}
@@ -413,7 +292,7 @@ func (m *ListValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ListValue.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -453,37 +332,392 @@ func init() {
func init() { proto.RegisterFile("google/protobuf/struct.proto", fileDescriptor_df322afd6c9fb402) }
var fileDescriptor_df322afd6c9fb402 = []byte{
- // 439 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x91, 0xc1, 0x6b, 0xd4, 0x40,
- 0x14, 0xc6, 0xf3, 0xb2, 0xdd, 0xe0, 0xbe, 0x48, 0x2d, 0x23, 0xe8, 0x52, 0x65, 0x5c, 0xb6, 0x97,
- 0x45, 0x24, 0x85, 0xf5, 0x22, 0xae, 0x17, 0x17, 0x6a, 0x0b, 0x86, 0x12, 0xa3, 0xad, 0xe0, 0x65,
- 0x31, 0x69, 0xba, 0x84, 0x4e, 0x67, 0x4a, 0x32, 0xa3, 0xec, 0x4d, 0xff, 0x0b, 0xcf, 0x9e, 0xc4,
- 0xa3, 0x7f, 0x85, 0x47, 0x8f, 0x1e, 0xdd, 0x78, 0xf1, 0xd8, 0x63, 0x8f, 0x32, 0x33, 0x49, 0x94,
- 0x2e, 0xbd, 0xe5, 0x7d, 0xf3, 0x7b, 0xdf, 0x7b, 0xdf, 0x0b, 0xde, 0x9d, 0x0b, 0x31, 0x67, 0xd9,
- 0xf6, 0x59, 0x21, 0xa4, 0x48, 0xd4, 0xf1, 0x76, 0x29, 0x0b, 0x95, 0xca, 0xc0, 0xd4, 0xe4, 0x86,
- 0x7d, 0x0d, 0x9a, 0xd7, 0xe1, 0x27, 0x40, 0xef, 0xa5, 0x21, 0xc8, 0x04, 0xbd, 0xe3, 0x3c, 0x63,
- 0x47, 0x65, 0x1f, 0x06, 0x9d, 0x91, 0x3f, 0xde, 0x0a, 0x2e, 0xc1, 0x81, 0x05, 0x83, 0x67, 0x86,
- 0xda, 0xe1, 0xb2, 0x58, 0xc4, 0x75, 0xcb, 0xe6, 0x0b, 0xf4, 0xff, 0x93, 0xc9, 0x06, 0x76, 0x4e,
- 0xb2, 0x45, 0x1f, 0x06, 0x30, 0xea, 0xc5, 0xfa, 0x93, 0x3c, 0xc0, 0xee, 0xbb, 0xb7, 0x4c, 0x65,
- 0x7d, 0x77, 0x00, 0x23, 0x7f, 0x7c, 0x6b, 0xc5, 0xfc, 0x50, 0xbf, 0xc6, 0x16, 0x7a, 0xec, 0x3e,
- 0x82, 0xe1, 0x37, 0x17, 0xbb, 0x46, 0x24, 0x13, 0x44, 0xae, 0x18, 0x9b, 0x59, 0x03, 0x6d, 0xba,
- 0x3e, 0xde, 0x5c, 0x31, 0xd8, 0x57, 0x8c, 0x19, 0x7e, 0xcf, 0x89, 0x7b, 0xbc, 0x29, 0xc8, 0x16,
- 0x5e, 0xe7, 0xea, 0x34, 0xc9, 0x8a, 0xd9, 0xbf, 0xf9, 0xb0, 0xe7, 0xc4, 0xbe, 0x55, 0x5b, 0xa8,
- 0x94, 0x45, 0xce, 0xe7, 0x35, 0xd4, 0xd1, 0x8b, 0x6b, 0xc8, 0xaa, 0x16, 0xba, 0x87, 0x98, 0x08,
- 0xd1, 0xac, 0xb1, 0x36, 0x80, 0xd1, 0x35, 0x3d, 0x4a, 0x6b, 0x16, 0x78, 0x62, 0x5c, 0x54, 0x2a,
- 0x6b, 0xa4, 0x6b, 0xa2, 0xde, 0xbe, 0xe2, 0x8e, 0xb5, 0xbd, 0x4a, 0x65, 0x9b, 0x92, 0xe5, 0x65,
- 0xd3, 0xeb, 0x99, 0xde, 0xd5, 0x94, 0x61, 0x5e, 0xca, 0x36, 0x25, 0x6b, 0x8a, 0xa9, 0x87, 0x6b,
- 0x27, 0x39, 0x3f, 0x1a, 0x4e, 0xb0, 0xd7, 0x12, 0x24, 0x40, 0xcf, 0x98, 0x35, 0x7f, 0xf4, 0xaa,
- 0xa3, 0xd7, 0xd4, 0xfd, 0x3b, 0xd8, 0x6b, 0x8f, 0x48, 0xd6, 0x11, 0xf7, 0x0f, 0xc2, 0x70, 0x76,
- 0xf8, 0x34, 0x3c, 0xd8, 0xd9, 0x70, 0xa6, 0x1f, 0xe1, 0xe7, 0x92, 0x3a, 0xe7, 0x4b, 0x0a, 0x17,
- 0x4b, 0x0a, 0x1f, 0x2a, 0x0a, 0x5f, 0x2a, 0x0a, 0xdf, 0x2b, 0x0a, 0x3f, 0x2a, 0x0a, 0xbf, 0x2a,
- 0x0a, 0x7f, 0x2a, 0xea, 0x9c, 0x6b, 0xed, 0x37, 0x05, 0xbc, 0x99, 0x8a, 0xd3, 0xcb, 0xe3, 0xa6,
- 0xbe, 0x4d, 0x1e, 0xe9, 0x3a, 0x82, 0x37, 0x5d, 0xb9, 0x38, 0xcb, 0xca, 0x0b, 0x80, 0xcf, 0x6e,
- 0x67, 0x37, 0x9a, 0x7e, 0x75, 0xe9, 0xae, 0x6d, 0x88, 0x9a, 0xfd, 0x5e, 0x67, 0x8c, 0x3d, 0xe7,
- 0xe2, 0x3d, 0x7f, 0xa5, 0xc9, 0xc4, 0x33, 0x4e, 0x0f, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0xad,
- 0x84, 0x08, 0xae, 0xe5, 0x02, 0x00, 0x00,
+ // 443 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x91, 0xb1, 0x6f, 0xd3, 0x40,
+ 0x14, 0xc6, 0xfd, 0x9c, 0xc6, 0x22, 0xcf, 0xa8, 0x54, 0x87, 0x04, 0x51, 0x41, 0x47, 0x94, 0x2e,
+ 0x11, 0x42, 0xae, 0x14, 0x16, 0x44, 0x58, 0x88, 0x54, 0x5a, 0x89, 0xa8, 0x32, 0x86, 0x16, 0x89,
+ 0x25, 0xc2, 0xae, 0x1b, 0x59, 0xbd, 0xde, 0x55, 0xf6, 0x1d, 0x28, 0x1b, 0x0b, 0xff, 0x03, 0x33,
+ 0x13, 0x62, 0xe4, 0xaf, 0xe8, 0xc8, 0xc8, 0x48, 0xdc, 0x85, 0xb1, 0x63, 0x47, 0x74, 0x77, 0xb6,
+ 0x41, 0x8d, 0xb2, 0xf9, 0x7d, 0xf7, 0x7b, 0xdf, 0x7b, 0xdf, 0x33, 0xde, 0x9f, 0x09, 0x31, 0x63,
+ 0xe9, 0xf6, 0x59, 0x2e, 0xa4, 0x88, 0xd5, 0xf1, 0x76, 0x21, 0x73, 0x95, 0xc8, 0xc0, 0xd4, 0xe4,
+ 0x96, 0x7d, 0x0d, 0xea, 0xd7, 0xfe, 0x17, 0x40, 0xef, 0xb5, 0x21, 0xc8, 0x08, 0xbd, 0xe3, 0x2c,
+ 0x65, 0x47, 0x45, 0x17, 0x7a, 0xad, 0x81, 0x3f, 0xdc, 0x0a, 0xae, 0xc1, 0x81, 0x05, 0x83, 0x17,
+ 0x86, 0xda, 0xe1, 0x32, 0x9f, 0x47, 0x55, 0xcb, 0xe6, 0x2b, 0xf4, 0xff, 0x93, 0xc9, 0x06, 0xb6,
+ 0x4e, 0xd2, 0x79, 0x17, 0x7a, 0x30, 0xe8, 0x44, 0xfa, 0x93, 0x3c, 0xc2, 0xf6, 0x87, 0xf7, 0x4c,
+ 0xa5, 0x5d, 0xb7, 0x07, 0x03, 0x7f, 0x78, 0x67, 0xc9, 0xfc, 0x50, 0xbf, 0x46, 0x16, 0x7a, 0xea,
+ 0x3e, 0x81, 0xfe, 0x0f, 0x17, 0xdb, 0x46, 0x24, 0x23, 0x44, 0xae, 0x18, 0x9b, 0x5a, 0x03, 0x6d,
+ 0xba, 0x3e, 0xdc, 0x5c, 0x32, 0xd8, 0x57, 0x8c, 0x19, 0x7e, 0xcf, 0x89, 0x3a, 0xbc, 0x2e, 0xc8,
+ 0x16, 0xde, 0xe4, 0xea, 0x34, 0x4e, 0xf3, 0xe9, 0xbf, 0xf9, 0xb0, 0xe7, 0x44, 0xbe, 0x55, 0x1b,
+ 0xa8, 0x90, 0x79, 0xc6, 0x67, 0x15, 0xd4, 0xd2, 0x8b, 0x6b, 0xc8, 0xaa, 0x16, 0x7a, 0x80, 0x18,
+ 0x0b, 0x51, 0xaf, 0xb1, 0xd6, 0x83, 0xc1, 0x0d, 0x3d, 0x4a, 0x6b, 0x16, 0x78, 0x66, 0x5c, 0x54,
+ 0x22, 0x2b, 0xa4, 0x6d, 0xa2, 0xde, 0x5d, 0x71, 0xc7, 0xca, 0x5e, 0x25, 0xb2, 0x49, 0xc9, 0xb2,
+ 0xa2, 0xee, 0xf5, 0x4c, 0xef, 0x72, 0xca, 0x49, 0x56, 0xc8, 0x26, 0x25, 0xab, 0x8b, 0xb1, 0x87,
+ 0x6b, 0x27, 0x19, 0x3f, 0xea, 0x8f, 0xb0, 0xd3, 0x10, 0x24, 0x40, 0xcf, 0x98, 0xd5, 0x7f, 0x74,
+ 0xd5, 0xd1, 0x2b, 0xea, 0xe1, 0x3d, 0xec, 0x34, 0x47, 0x24, 0xeb, 0x88, 0xfb, 0x07, 0x93, 0xc9,
+ 0xf4, 0xf0, 0xf9, 0xe4, 0x60, 0x67, 0xc3, 0x19, 0x7f, 0x86, 0x5f, 0x0b, 0xea, 0x5c, 0x2e, 0x28,
+ 0x5c, 0x2d, 0x28, 0x7c, 0x2a, 0x29, 0x7c, 0x2b, 0x29, 0x9c, 0x97, 0x14, 0x7e, 0x96, 0x14, 0x7e,
+ 0x97, 0x14, 0xfe, 0x94, 0xd4, 0xb9, 0xd4, 0xfa, 0x05, 0x85, 0xf3, 0x0b, 0x0a, 0x78, 0x3b, 0x11,
+ 0xa7, 0xd7, 0x47, 0x8e, 0x7d, 0x9b, 0x3e, 0xd4, 0x75, 0x08, 0xef, 0xda, 0x72, 0x7e, 0x96, 0x16,
+ 0x57, 0x00, 0x5f, 0xdd, 0xd6, 0x6e, 0x38, 0xfe, 0xee, 0xd2, 0x5d, 0xdb, 0x10, 0xd6, 0x3b, 0xbe,
+ 0x4d, 0x19, 0x7b, 0xc9, 0xc5, 0x47, 0xfe, 0x46, 0x93, 0xb1, 0x67, 0x9c, 0x1e, 0xff, 0x0d, 0x00,
+ 0x00, 0xff, 0xff, 0x26, 0x30, 0xdb, 0xbe, 0xe9, 0x02, 0x00, 0x00,
+}
+
+func (this *Struct) Compare(that interface{}) int {
+ if that == nil {
+ if this == nil {
+ return 0
+ }
+ return 1
+ }
+
+ that1, ok := that.(*Struct)
+ if !ok {
+ that2, ok := that.(Struct)
+ if ok {
+ that1 = &that2
+ } else {
+ return 1
+ }
+ }
+ if that1 == nil {
+ if this == nil {
+ return 0
+ }
+ return 1
+ } else if this == nil {
+ return -1
+ }
+ if len(this.Fields) != len(that1.Fields) {
+ if len(this.Fields) < len(that1.Fields) {
+ return -1
+ }
+ return 1
+ }
+ for i := range this.Fields {
+ if c := this.Fields[i].Compare(that1.Fields[i]); c != 0 {
+ return c
+ }
+ }
+ if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 {
+ return c
+ }
+ return 0
+}
+func (this *Value) Compare(that interface{}) int {
+ if that == nil {
+ if this == nil {
+ return 0
+ }
+ return 1
+ }
+
+ that1, ok := that.(*Value)
+ if !ok {
+ that2, ok := that.(Value)
+ if ok {
+ that1 = &that2
+ } else {
+ return 1
+ }
+ }
+ if that1 == nil {
+ if this == nil {
+ return 0
+ }
+ return 1
+ } else if this == nil {
+ return -1
+ }
+ if that1.Kind == nil {
+ if this.Kind != nil {
+ return 1
+ }
+ } else if this.Kind == nil {
+ return -1
+ } else {
+ thisType := -1
+ switch this.Kind.(type) {
+ case *Value_NullValue:
+ thisType = 0
+ case *Value_NumberValue:
+ thisType = 1
+ case *Value_StringValue:
+ thisType = 2
+ case *Value_BoolValue:
+ thisType = 3
+ case *Value_StructValue:
+ thisType = 4
+ case *Value_ListValue:
+ thisType = 5
+ default:
+ panic(fmt.Sprintf("compare: unexpected type %T in oneof", this.Kind))
+ }
+ that1Type := -1
+ switch that1.Kind.(type) {
+ case *Value_NullValue:
+ that1Type = 0
+ case *Value_NumberValue:
+ that1Type = 1
+ case *Value_StringValue:
+ that1Type = 2
+ case *Value_BoolValue:
+ that1Type = 3
+ case *Value_StructValue:
+ that1Type = 4
+ case *Value_ListValue:
+ that1Type = 5
+ default:
+ panic(fmt.Sprintf("compare: unexpected type %T in oneof", that1.Kind))
+ }
+ if thisType == that1Type {
+ if c := this.Kind.Compare(that1.Kind); c != 0 {
+ return c
+ }
+ } else if thisType < that1Type {
+ return -1
+ } else if thisType > that1Type {
+ return 1
+ }
+ }
+ if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 {
+ return c
+ }
+ return 0
+}
+func (this *Value_NullValue) Compare(that interface{}) int {
+ if that == nil {
+ if this == nil {
+ return 0
+ }
+ return 1
+ }
+
+ that1, ok := that.(*Value_NullValue)
+ if !ok {
+ that2, ok := that.(Value_NullValue)
+ if ok {
+ that1 = &that2
+ } else {
+ return 1
+ }
+ }
+ if that1 == nil {
+ if this == nil {
+ return 0
+ }
+ return 1
+ } else if this == nil {
+ return -1
+ }
+ if this.NullValue != that1.NullValue {
+ if this.NullValue < that1.NullValue {
+ return -1
+ }
+ return 1
+ }
+ return 0
+}
+func (this *Value_NumberValue) Compare(that interface{}) int {
+ if that == nil {
+ if this == nil {
+ return 0
+ }
+ return 1
+ }
+
+ that1, ok := that.(*Value_NumberValue)
+ if !ok {
+ that2, ok := that.(Value_NumberValue)
+ if ok {
+ that1 = &that2
+ } else {
+ return 1
+ }
+ }
+ if that1 == nil {
+ if this == nil {
+ return 0
+ }
+ return 1
+ } else if this == nil {
+ return -1
+ }
+ if this.NumberValue != that1.NumberValue {
+ if this.NumberValue < that1.NumberValue {
+ return -1
+ }
+ return 1
+ }
+ return 0
+}
+func (this *Value_StringValue) Compare(that interface{}) int {
+ if that == nil {
+ if this == nil {
+ return 0
+ }
+ return 1
+ }
+
+ that1, ok := that.(*Value_StringValue)
+ if !ok {
+ that2, ok := that.(Value_StringValue)
+ if ok {
+ that1 = &that2
+ } else {
+ return 1
+ }
+ }
+ if that1 == nil {
+ if this == nil {
+ return 0
+ }
+ return 1
+ } else if this == nil {
+ return -1
+ }
+ if this.StringValue != that1.StringValue {
+ if this.StringValue < that1.StringValue {
+ return -1
+ }
+ return 1
+ }
+ return 0
+}
+func (this *Value_BoolValue) Compare(that interface{}) int {
+ if that == nil {
+ if this == nil {
+ return 0
+ }
+ return 1
+ }
+
+ that1, ok := that.(*Value_BoolValue)
+ if !ok {
+ that2, ok := that.(Value_BoolValue)
+ if ok {
+ that1 = &that2
+ } else {
+ return 1
+ }
+ }
+ if that1 == nil {
+ if this == nil {
+ return 0
+ }
+ return 1
+ } else if this == nil {
+ return -1
+ }
+ if this.BoolValue != that1.BoolValue {
+ if !this.BoolValue {
+ return -1
+ }
+ return 1
+ }
+ return 0
+}
+func (this *Value_StructValue) Compare(that interface{}) int {
+ if that == nil {
+ if this == nil {
+ return 0
+ }
+ return 1
+ }
+
+ that1, ok := that.(*Value_StructValue)
+ if !ok {
+ that2, ok := that.(Value_StructValue)
+ if ok {
+ that1 = &that2
+ } else {
+ return 1
+ }
+ }
+ if that1 == nil {
+ if this == nil {
+ return 0
+ }
+ return 1
+ } else if this == nil {
+ return -1
+ }
+ if c := this.StructValue.Compare(that1.StructValue); c != 0 {
+ return c
+ }
+ return 0
}
+func (this *Value_ListValue) Compare(that interface{}) int {
+ if that == nil {
+ if this == nil {
+ return 0
+ }
+ return 1
+ }
+ that1, ok := that.(*Value_ListValue)
+ if !ok {
+ that2, ok := that.(Value_ListValue)
+ if ok {
+ that1 = &that2
+ } else {
+ return 1
+ }
+ }
+ if that1 == nil {
+ if this == nil {
+ return 0
+ }
+ return 1
+ } else if this == nil {
+ return -1
+ }
+ if c := this.ListValue.Compare(that1.ListValue); c != 0 {
+ return c
+ }
+ return 0
+}
+func (this *ListValue) Compare(that interface{}) int {
+ if that == nil {
+ if this == nil {
+ return 0
+ }
+ return 1
+ }
+
+ that1, ok := that.(*ListValue)
+ if !ok {
+ that2, ok := that.(ListValue)
+ if ok {
+ that1 = &that2
+ } else {
+ return 1
+ }
+ }
+ if that1 == nil {
+ if this == nil {
+ return 0
+ }
+ return 1
+ } else if this == nil {
+ return -1
+ }
+ if len(this.Values) != len(that1.Values) {
+ if len(this.Values) < len(that1.Values) {
+ return -1
+ }
+ return 1
+ }
+ for i := range this.Values {
+ if c := this.Values[i].Compare(that1.Values[i]); c != 0 {
+ return c
+ }
+ }
+ if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 {
+ return c
+ }
+ return 0
+}
func (x NullValue) String() string {
s, ok := NullValue_name[int32(x)]
if ok {
@@ -846,7 +1080,7 @@ func valueToGoStringStruct(v interface{}, typ string) string {
func (m *Struct) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -854,48 +1088,52 @@ func (m *Struct) Marshal() (dAtA []byte, err error) {
}
func (m *Struct) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Struct) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if len(m.Fields) > 0 {
for k := range m.Fields {
- dAtA[i] = 0xa
- i++
v := m.Fields[k]
- msgSize := 0
- if v != nil {
- msgSize = v.Size()
- msgSize += 1 + sovStruct(uint64(msgSize))
- }
- mapSize := 1 + len(k) + sovStruct(uint64(len(k))) + msgSize
- i = encodeVarintStruct(dAtA, i, uint64(mapSize))
- dAtA[i] = 0xa
- i++
- i = encodeVarintStruct(dAtA, i, uint64(len(k)))
- i += copy(dAtA[i:], k)
+ baseI := i
if v != nil {
- dAtA[i] = 0x12
- i++
- i = encodeVarintStruct(dAtA, i, uint64(v.Size()))
- n1, err := v.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
+ {
+ size, err := v.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintStruct(dAtA, i, uint64(size))
}
- i += n1
+ i--
+ dAtA[i] = 0x12
}
+ i -= len(k)
+ copy(dAtA[i:], k)
+ i = encodeVarintStruct(dAtA, i, uint64(len(k)))
+ i--
+ dAtA[i] = 0xa
+ i = encodeVarintStruct(dAtA, i, uint64(baseI-i))
+ i--
+ dAtA[i] = 0xa
}
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
- }
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *Value) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -903,90 +1141,133 @@ func (m *Value) Marshal() (dAtA []byte, err error) {
}
func (m *Value) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Value) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if m.Kind != nil {
- nn2, err := m.Kind.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
+ {
+ size := m.Kind.Size()
+ i -= size
+ if _, err := m.Kind.MarshalTo(dAtA[i:]); err != nil {
+ return 0, err
+ }
}
- i += nn2
- }
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *Value_NullValue) MarshalTo(dAtA []byte) (int, error) {
- i := 0
- dAtA[i] = 0x8
- i++
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Value_NullValue) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
i = encodeVarintStruct(dAtA, i, uint64(m.NullValue))
- return i, nil
+ i--
+ dAtA[i] = 0x8
+ return len(dAtA) - i, nil
}
func (m *Value_NumberValue) MarshalTo(dAtA []byte) (int, error) {
- i := 0
- dAtA[i] = 0x11
- i++
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Value_NumberValue) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ i -= 8
encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.NumberValue))))
- i += 8
- return i, nil
+ i--
+ dAtA[i] = 0x11
+ return len(dAtA) - i, nil
}
func (m *Value_StringValue) MarshalTo(dAtA []byte) (int, error) {
- i := 0
- dAtA[i] = 0x1a
- i++
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Value_StringValue) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ i -= len(m.StringValue)
+ copy(dAtA[i:], m.StringValue)
i = encodeVarintStruct(dAtA, i, uint64(len(m.StringValue)))
- i += copy(dAtA[i:], m.StringValue)
- return i, nil
+ i--
+ dAtA[i] = 0x1a
+ return len(dAtA) - i, nil
}
func (m *Value_BoolValue) MarshalTo(dAtA []byte) (int, error) {
- i := 0
- dAtA[i] = 0x20
- i++
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Value_BoolValue) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ i--
if m.BoolValue {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
- i++
- return i, nil
+ i--
+ dAtA[i] = 0x20
+ return len(dAtA) - i, nil
}
func (m *Value_StructValue) MarshalTo(dAtA []byte) (int, error) {
- i := 0
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Value_StructValue) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
if m.StructValue != nil {
- dAtA[i] = 0x2a
- i++
- i = encodeVarintStruct(dAtA, i, uint64(m.StructValue.Size()))
- n3, err := m.StructValue.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
+ {
+ size, err := m.StructValue.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintStruct(dAtA, i, uint64(size))
}
- i += n3
+ i--
+ dAtA[i] = 0x2a
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *Value_ListValue) MarshalTo(dAtA []byte) (int, error) {
- i := 0
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Value_ListValue) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
if m.ListValue != nil {
- dAtA[i] = 0x32
- i++
- i = encodeVarintStruct(dAtA, i, uint64(m.ListValue.Size()))
- n4, err := m.ListValue.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
+ {
+ size, err := m.ListValue.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintStruct(dAtA, i, uint64(size))
}
- i += n4
+ i--
+ dAtA[i] = 0x32
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *ListValue) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -994,40 +1275,50 @@ func (m *ListValue) Marshal() (dAtA []byte, err error) {
}
func (m *ListValue) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ListValue) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if len(m.Values) > 0 {
- for _, msg := range m.Values {
- dAtA[i] = 0xa
- i++
- i = encodeVarintStruct(dAtA, i, uint64(msg.Size()))
- n, err := msg.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
+ for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Values[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintStruct(dAtA, i, uint64(size))
}
- i += n
+ i--
+ dAtA[i] = 0xa
}
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
- }
- return i, nil
+ return len(dAtA) - i, nil
}
func encodeVarintStruct(dAtA []byte, offset int, v uint64) int {
+ offset -= sovStruct(v)
+ base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
- return offset + 1
+ return base
}
func NewPopulatedStruct(r randyStruct, easy bool) *Struct {
this := &Struct{}
- if r.Intn(10) == 0 {
+ if r.Intn(5) == 0 {
v1 := r.Intn(10)
this.Fields = make(map[string]*Value)
for i := 0; i < v1; i++ {
@@ -1098,7 +1389,7 @@ func NewPopulatedValue_ListValue(r randyStruct, easy bool) *Value_ListValue {
}
func NewPopulatedListValue(r randyStruct, easy bool) *ListValue {
this := &ListValue{}
- if r.Intn(10) == 0 {
+ if r.Intn(5) == 0 {
v2 := r.Intn(5)
this.Values = make([]*Value, v2)
for i := 0; i < v2; i++ {
@@ -1303,14 +1594,7 @@ func (m *ListValue) Size() (n int) {
}
func sovStruct(x uint64) (n int) {
- for {
- n++
- x >>= 7
- if x == 0 {
- break
- }
- }
- return n
+ return (math_bits.Len64(x|1) + 6) / 7
}
func sozStruct(x uint64) (n int) {
return sovStruct(uint64((x << 1) ^ uint64((int64(x) >> 63))))
@@ -1411,8 +1695,13 @@ func (this *ListValue) String() string {
if this == nil {
return "nil"
}
+ repeatedStringForValues := "[]*Value{"
+ for _, f := range this.Values {
+ repeatedStringForValues += strings.Replace(f.String(), "Value", "Value", 1) + ","
+ }
+ repeatedStringForValues += "}"
s := strings.Join([]string{`&ListValue{`,
- `Values:` + strings.Replace(fmt.Sprintf("%v", this.Values), "Value", "Value", 1) + `,`,
+ `Values:` + repeatedStringForValues + `,`,
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
`}`,
}, "")
@@ -1908,6 +2197,7 @@ func (m *ListValue) Unmarshal(dAtA []byte) error {
func skipStruct(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
+ depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
@@ -1939,10 +2229,8 @@ func skipStruct(dAtA []byte) (n int, err error) {
break
}
}
- return iNdEx, nil
case 1:
iNdEx += 8
- return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
@@ -1963,55 +2251,30 @@ func skipStruct(dAtA []byte) (n int, err error) {
return 0, ErrInvalidLengthStruct
}
iNdEx += length
- if iNdEx < 0 {
- return 0, ErrInvalidLengthStruct
- }
- return iNdEx, nil
case 3:
- for {
- var innerWire uint64
- var start int = iNdEx
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowStruct
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- innerWire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- innerWireType := int(innerWire & 0x7)
- if innerWireType == 4 {
- break
- }
- next, err := skipStruct(dAtA[start:])
- if err != nil {
- return 0, err
- }
- iNdEx = start + next
- if iNdEx < 0 {
- return 0, ErrInvalidLengthStruct
- }
- }
- return iNdEx, nil
+ depth++
case 4:
- return iNdEx, nil
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupStruct
+ }
+ depth--
case 5:
iNdEx += 4
- return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthStruct
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
}
- panic("unreachable")
+ return 0, io.ErrUnexpectedEOF
}
var (
- ErrInvalidLengthStruct = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowStruct = fmt.Errorf("proto: integer overflow")
+ ErrInvalidLengthStruct = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowStruct = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupStruct = fmt.Errorf("proto: unexpected end of group")
)
diff --git a/vendor/github.com/gogo/protobuf/types/timestamp.pb.go b/vendor/github.com/gogo/protobuf/types/timestamp.pb.go
index 3ee6cb0a1..b81875267 100644
--- a/vendor/github.com/gogo/protobuf/types/timestamp.pb.go
+++ b/vendor/github.com/gogo/protobuf/types/timestamp.pb.go
@@ -9,6 +9,7 @@ import (
proto "github.com/gogo/protobuf/proto"
io "io"
math "math"
+ math_bits "math/bits"
reflect "reflect"
strings "strings"
)
@@ -22,7 +23,7 @@ var _ = math.Inf
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
// A Timestamp represents a point in time independent of any time zone or local
// calendar, encoded as a count of seconds and fractions of seconds at
@@ -97,11 +98,13 @@ const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
// 01:30 UTC on January 15, 2017.
//
// In JavaScript, one can convert a Date object to this format using the
-// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString)
+// standard
+// [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString)
// method. In Python, a standard `datetime.datetime` object can be converted
-// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime)
-// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one
-// can use the Joda Time's [`ISODateTimeFormat.dateTime()`](
+// to this format using
+// [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) with
+// the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one can use
+// the Joda Time's [`ISODateTimeFormat.dateTime()`](
// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime%2D%2D
// ) to obtain a formatter capable of generating timestamps in this format.
//
@@ -135,7 +138,7 @@ func (m *Timestamp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Timestamp.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -292,7 +295,7 @@ func valueToGoStringTimestamp(v interface{}, typ string) string {
func (m *Timestamp) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -300,34 +303,42 @@ func (m *Timestamp) Marshal() (dAtA []byte, err error) {
}
func (m *Timestamp) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Timestamp) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if m.Seconds != 0 {
- dAtA[i] = 0x8
- i++
- i = encodeVarintTimestamp(dAtA, i, uint64(m.Seconds))
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
if m.Nanos != 0 {
- dAtA[i] = 0x10
- i++
i = encodeVarintTimestamp(dAtA, i, uint64(m.Nanos))
+ i--
+ dAtA[i] = 0x10
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if m.Seconds != 0 {
+ i = encodeVarintTimestamp(dAtA, i, uint64(m.Seconds))
+ i--
+ dAtA[i] = 0x8
}
- return i, nil
+ return len(dAtA) - i, nil
}
func encodeVarintTimestamp(dAtA []byte, offset int, v uint64) int {
+ offset -= sovTimestamp(v)
+ base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
- return offset + 1
+ return base
}
func (m *Timestamp) Size() (n int) {
if m == nil {
@@ -348,14 +359,7 @@ func (m *Timestamp) Size() (n int) {
}
func sovTimestamp(x uint64) (n int) {
- for {
- n++
- x >>= 7
- if x == 0 {
- break
- }
- }
- return n
+ return (math_bits.Len64(x|1) + 6) / 7
}
func sozTimestamp(x uint64) (n int) {
return sovTimestamp(uint64((x << 1) ^ uint64((int64(x) >> 63))))
@@ -455,6 +459,7 @@ func (m *Timestamp) Unmarshal(dAtA []byte) error {
func skipTimestamp(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
+ depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
@@ -486,10 +491,8 @@ func skipTimestamp(dAtA []byte) (n int, err error) {
break
}
}
- return iNdEx, nil
case 1:
iNdEx += 8
- return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
@@ -510,55 +513,30 @@ func skipTimestamp(dAtA []byte) (n int, err error) {
return 0, ErrInvalidLengthTimestamp
}
iNdEx += length
- if iNdEx < 0 {
- return 0, ErrInvalidLengthTimestamp
- }
- return iNdEx, nil
case 3:
- for {
- var innerWire uint64
- var start int = iNdEx
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowTimestamp
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- innerWire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- innerWireType := int(innerWire & 0x7)
- if innerWireType == 4 {
- break
- }
- next, err := skipTimestamp(dAtA[start:])
- if err != nil {
- return 0, err
- }
- iNdEx = start + next
- if iNdEx < 0 {
- return 0, ErrInvalidLengthTimestamp
- }
- }
- return iNdEx, nil
+ depth++
case 4:
- return iNdEx, nil
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupTimestamp
+ }
+ depth--
case 5:
iNdEx += 4
- return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthTimestamp
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
}
- panic("unreachable")
+ return 0, io.ErrUnexpectedEOF
}
var (
- ErrInvalidLengthTimestamp = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowTimestamp = fmt.Errorf("proto: integer overflow")
+ ErrInvalidLengthTimestamp = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowTimestamp = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupTimestamp = fmt.Errorf("proto: unexpected end of group")
)
diff --git a/vendor/github.com/gogo/protobuf/types/type.pb.go b/vendor/github.com/gogo/protobuf/types/type.pb.go
index 366f493d2..13b7ec02f 100644
--- a/vendor/github.com/gogo/protobuf/types/type.pb.go
+++ b/vendor/github.com/gogo/protobuf/types/type.pb.go
@@ -9,6 +9,7 @@ import (
proto "github.com/gogo/protobuf/proto"
io "io"
math "math"
+ math_bits "math/bits"
reflect "reflect"
strconv "strconv"
strings "strings"
@@ -23,7 +24,7 @@ var _ = math.Inf
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
// The syntax in which a protocol buffer element is defined.
type Syntax int32
@@ -205,7 +206,7 @@ func (m *Type) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Type.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -312,7 +313,7 @@ func (m *Field) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Field.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -435,7 +436,7 @@ func (m *Enum) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Enum.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -519,7 +520,7 @@ func (m *EnumValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_EnumValue.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -594,7 +595,7 @@ func (m *Option) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Option.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -1404,7 +1405,7 @@ func valueToGoStringType(v interface{}, typ string) string {
func (m *Type) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1412,80 +1413,87 @@ func (m *Type) Marshal() (dAtA []byte, err error) {
}
func (m *Type) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Type) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.Name) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintType(dAtA, i, uint64(len(m.Name)))
- i += copy(dAtA[i:], m.Name)
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
- if len(m.Fields) > 0 {
- for _, msg := range m.Fields {
- dAtA[i] = 0x12
- i++
- i = encodeVarintType(dAtA, i, uint64(msg.Size()))
- n, err := msg.MarshalTo(dAtA[i:])
+ if m.Syntax != 0 {
+ i = encodeVarintType(dAtA, i, uint64(m.Syntax))
+ i--
+ dAtA[i] = 0x30
+ }
+ if m.SourceContext != nil {
+ {
+ size, err := m.SourceContext.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
- i += n
- }
- }
- if len(m.Oneofs) > 0 {
- for _, s := range m.Oneofs {
- dAtA[i] = 0x1a
- i++
- l = len(s)
- for l >= 1<<7 {
- dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
- l >>= 7
- i++
- }
- dAtA[i] = uint8(l)
- i++
- i += copy(dAtA[i:], s)
+ i -= size
+ i = encodeVarintType(dAtA, i, uint64(size))
}
+ i--
+ dAtA[i] = 0x2a
}
if len(m.Options) > 0 {
- for _, msg := range m.Options {
- dAtA[i] = 0x22
- i++
- i = encodeVarintType(dAtA, i, uint64(msg.Size()))
- n, err := msg.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
+ for iNdEx := len(m.Options) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Options[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintType(dAtA, i, uint64(size))
}
- i += n
+ i--
+ dAtA[i] = 0x22
}
}
- if m.SourceContext != nil {
- dAtA[i] = 0x2a
- i++
- i = encodeVarintType(dAtA, i, uint64(m.SourceContext.Size()))
- n1, err := m.SourceContext.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
+ if len(m.Oneofs) > 0 {
+ for iNdEx := len(m.Oneofs) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Oneofs[iNdEx])
+ copy(dAtA[i:], m.Oneofs[iNdEx])
+ i = encodeVarintType(dAtA, i, uint64(len(m.Oneofs[iNdEx])))
+ i--
+ dAtA[i] = 0x1a
}
- i += n1
}
- if m.Syntax != 0 {
- dAtA[i] = 0x30
- i++
- i = encodeVarintType(dAtA, i, uint64(m.Syntax))
+ if len(m.Fields) > 0 {
+ for iNdEx := len(m.Fields) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Fields[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintType(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if len(m.Name) > 0 {
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintType(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *Field) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1493,86 +1501,98 @@ func (m *Field) Marshal() (dAtA []byte, err error) {
}
func (m *Field) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Field) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if m.Kind != 0 {
- dAtA[i] = 0x8
- i++
- i = encodeVarintType(dAtA, i, uint64(m.Kind))
- }
- if m.Cardinality != 0 {
- dAtA[i] = 0x10
- i++
- i = encodeVarintType(dAtA, i, uint64(m.Cardinality))
- }
- if m.Number != 0 {
- dAtA[i] = 0x18
- i++
- i = encodeVarintType(dAtA, i, uint64(m.Number))
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
- if len(m.Name) > 0 {
- dAtA[i] = 0x22
- i++
- i = encodeVarintType(dAtA, i, uint64(len(m.Name)))
- i += copy(dAtA[i:], m.Name)
+ if len(m.DefaultValue) > 0 {
+ i -= len(m.DefaultValue)
+ copy(dAtA[i:], m.DefaultValue)
+ i = encodeVarintType(dAtA, i, uint64(len(m.DefaultValue)))
+ i--
+ dAtA[i] = 0x5a
}
- if len(m.TypeUrl) > 0 {
- dAtA[i] = 0x32
- i++
- i = encodeVarintType(dAtA, i, uint64(len(m.TypeUrl)))
- i += copy(dAtA[i:], m.TypeUrl)
+ if len(m.JsonName) > 0 {
+ i -= len(m.JsonName)
+ copy(dAtA[i:], m.JsonName)
+ i = encodeVarintType(dAtA, i, uint64(len(m.JsonName)))
+ i--
+ dAtA[i] = 0x52
}
- if m.OneofIndex != 0 {
- dAtA[i] = 0x38
- i++
- i = encodeVarintType(dAtA, i, uint64(m.OneofIndex))
+ if len(m.Options) > 0 {
+ for iNdEx := len(m.Options) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Options[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintType(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x4a
+ }
}
if m.Packed {
- dAtA[i] = 0x40
- i++
+ i--
if m.Packed {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
- i++
+ i--
+ dAtA[i] = 0x40
}
- if len(m.Options) > 0 {
- for _, msg := range m.Options {
- dAtA[i] = 0x4a
- i++
- i = encodeVarintType(dAtA, i, uint64(msg.Size()))
- n, err := msg.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
- }
- i += n
- }
+ if m.OneofIndex != 0 {
+ i = encodeVarintType(dAtA, i, uint64(m.OneofIndex))
+ i--
+ dAtA[i] = 0x38
}
- if len(m.JsonName) > 0 {
- dAtA[i] = 0x52
- i++
- i = encodeVarintType(dAtA, i, uint64(len(m.JsonName)))
- i += copy(dAtA[i:], m.JsonName)
+ if len(m.TypeUrl) > 0 {
+ i -= len(m.TypeUrl)
+ copy(dAtA[i:], m.TypeUrl)
+ i = encodeVarintType(dAtA, i, uint64(len(m.TypeUrl)))
+ i--
+ dAtA[i] = 0x32
}
- if len(m.DefaultValue) > 0 {
- dAtA[i] = 0x5a
- i++
- i = encodeVarintType(dAtA, i, uint64(len(m.DefaultValue)))
- i += copy(dAtA[i:], m.DefaultValue)
+ if len(m.Name) > 0 {
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintType(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0x22
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if m.Number != 0 {
+ i = encodeVarintType(dAtA, i, uint64(m.Number))
+ i--
+ dAtA[i] = 0x18
+ }
+ if m.Cardinality != 0 {
+ i = encodeVarintType(dAtA, i, uint64(m.Cardinality))
+ i--
+ dAtA[i] = 0x10
+ }
+ if m.Kind != 0 {
+ i = encodeVarintType(dAtA, i, uint64(m.Kind))
+ i--
+ dAtA[i] = 0x8
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *Enum) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1580,65 +1600,78 @@ func (m *Enum) Marshal() (dAtA []byte, err error) {
}
func (m *Enum) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Enum) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.Name) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintType(dAtA, i, uint64(len(m.Name)))
- i += copy(dAtA[i:], m.Name)
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
- if len(m.Enumvalue) > 0 {
- for _, msg := range m.Enumvalue {
- dAtA[i] = 0x12
- i++
- i = encodeVarintType(dAtA, i, uint64(msg.Size()))
- n, err := msg.MarshalTo(dAtA[i:])
+ if m.Syntax != 0 {
+ i = encodeVarintType(dAtA, i, uint64(m.Syntax))
+ i--
+ dAtA[i] = 0x28
+ }
+ if m.SourceContext != nil {
+ {
+ size, err := m.SourceContext.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
- i += n
+ i -= size
+ i = encodeVarintType(dAtA, i, uint64(size))
}
+ i--
+ dAtA[i] = 0x22
}
if len(m.Options) > 0 {
- for _, msg := range m.Options {
- dAtA[i] = 0x1a
- i++
- i = encodeVarintType(dAtA, i, uint64(msg.Size()))
- n, err := msg.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
+ for iNdEx := len(m.Options) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Options[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintType(dAtA, i, uint64(size))
}
- i += n
+ i--
+ dAtA[i] = 0x1a
}
}
- if m.SourceContext != nil {
- dAtA[i] = 0x22
- i++
- i = encodeVarintType(dAtA, i, uint64(m.SourceContext.Size()))
- n2, err := m.SourceContext.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
+ if len(m.Enumvalue) > 0 {
+ for iNdEx := len(m.Enumvalue) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Enumvalue[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintType(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
}
- i += n2
}
- if m.Syntax != 0 {
- dAtA[i] = 0x28
- i++
- i = encodeVarintType(dAtA, i, uint64(m.Syntax))
- }
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if len(m.Name) > 0 {
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintType(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *EnumValue) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1646,43 +1679,52 @@ func (m *EnumValue) Marshal() (dAtA []byte, err error) {
}
func (m *EnumValue) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *EnumValue) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.Name) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintType(dAtA, i, uint64(len(m.Name)))
- i += copy(dAtA[i:], m.Name)
- }
- if m.Number != 0 {
- dAtA[i] = 0x10
- i++
- i = encodeVarintType(dAtA, i, uint64(m.Number))
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
if len(m.Options) > 0 {
- for _, msg := range m.Options {
- dAtA[i] = 0x1a
- i++
- i = encodeVarintType(dAtA, i, uint64(msg.Size()))
- n, err := msg.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
+ for iNdEx := len(m.Options) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Options[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintType(dAtA, i, uint64(size))
}
- i += n
+ i--
+ dAtA[i] = 0x1a
}
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if m.Number != 0 {
+ i = encodeVarintType(dAtA, i, uint64(m.Number))
+ i--
+ dAtA[i] = 0x10
}
- return i, nil
+ if len(m.Name) > 0 {
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintType(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
}
func (m *Option) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1690,45 +1732,56 @@ func (m *Option) Marshal() (dAtA []byte, err error) {
}
func (m *Option) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Option) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.Name) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintType(dAtA, i, uint64(len(m.Name)))
- i += copy(dAtA[i:], m.Name)
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
if m.Value != nil {
- dAtA[i] = 0x12
- i++
- i = encodeVarintType(dAtA, i, uint64(m.Value.Size()))
- n3, err := m.Value.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
+ {
+ size, err := m.Value.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintType(dAtA, i, uint64(size))
}
- i += n3
+ i--
+ dAtA[i] = 0x12
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ if len(m.Name) > 0 {
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintType(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func encodeVarintType(dAtA []byte, offset int, v uint64) int {
+ offset -= sovType(v)
+ base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
- return offset + 1
+ return base
}
func NewPopulatedType(r randyType, easy bool) *Type {
this := &Type{}
this.Name = string(randStringType(r))
- if r.Intn(10) != 0 {
+ if r.Intn(5) != 0 {
v1 := r.Intn(5)
this.Fields = make([]*Field, v1)
for i := 0; i < v1; i++ {
@@ -1740,14 +1793,14 @@ func NewPopulatedType(r randyType, easy bool) *Type {
for i := 0; i < v2; i++ {
this.Oneofs[i] = string(randStringType(r))
}
- if r.Intn(10) != 0 {
+ if r.Intn(5) != 0 {
v3 := r.Intn(5)
this.Options = make([]*Option, v3)
for i := 0; i < v3; i++ {
this.Options[i] = NewPopulatedOption(r, easy)
}
}
- if r.Intn(10) != 0 {
+ if r.Intn(5) != 0 {
this.SourceContext = NewPopulatedSourceContext(r, easy)
}
this.Syntax = Syntax([]int32{0, 1}[r.Intn(2)])
@@ -1772,7 +1825,7 @@ func NewPopulatedField(r randyType, easy bool) *Field {
this.OneofIndex *= -1
}
this.Packed = bool(bool(r.Intn(2) == 0))
- if r.Intn(10) != 0 {
+ if r.Intn(5) != 0 {
v4 := r.Intn(5)
this.Options = make([]*Option, v4)
for i := 0; i < v4; i++ {
@@ -1790,21 +1843,21 @@ func NewPopulatedField(r randyType, easy bool) *Field {
func NewPopulatedEnum(r randyType, easy bool) *Enum {
this := &Enum{}
this.Name = string(randStringType(r))
- if r.Intn(10) != 0 {
+ if r.Intn(5) != 0 {
v5 := r.Intn(5)
this.Enumvalue = make([]*EnumValue, v5)
for i := 0; i < v5; i++ {
this.Enumvalue[i] = NewPopulatedEnumValue(r, easy)
}
}
- if r.Intn(10) != 0 {
+ if r.Intn(5) != 0 {
v6 := r.Intn(5)
this.Options = make([]*Option, v6)
for i := 0; i < v6; i++ {
this.Options[i] = NewPopulatedOption(r, easy)
}
}
- if r.Intn(10) != 0 {
+ if r.Intn(5) != 0 {
this.SourceContext = NewPopulatedSourceContext(r, easy)
}
this.Syntax = Syntax([]int32{0, 1}[r.Intn(2)])
@@ -1821,7 +1874,7 @@ func NewPopulatedEnumValue(r randyType, easy bool) *EnumValue {
if r.Intn(2) == 0 {
this.Number *= -1
}
- if r.Intn(10) != 0 {
+ if r.Intn(5) != 0 {
v7 := r.Intn(5)
this.Options = make([]*Option, v7)
for i := 0; i < v7; i++ {
@@ -1837,7 +1890,7 @@ func NewPopulatedEnumValue(r randyType, easy bool) *EnumValue {
func NewPopulatedOption(r randyType, easy bool) *Option {
this := &Option{}
this.Name = string(randStringType(r))
- if r.Intn(10) != 0 {
+ if r.Intn(5) != 0 {
this.Value = NewPopulatedAny(r, easy)
}
if !easy && r.Intn(10) != 0 {
@@ -2089,14 +2142,7 @@ func (m *Option) Size() (n int) {
}
func sovType(x uint64) (n int) {
- for {
- n++
- x >>= 7
- if x == 0 {
- break
- }
- }
- return n
+ return (math_bits.Len64(x|1) + 6) / 7
}
func sozType(x uint64) (n int) {
return sovType(uint64((x << 1) ^ uint64((int64(x) >> 63))))
@@ -2105,11 +2151,21 @@ func (this *Type) String() string {
if this == nil {
return "nil"
}
+ repeatedStringForFields := "[]*Field{"
+ for _, f := range this.Fields {
+ repeatedStringForFields += strings.Replace(f.String(), "Field", "Field", 1) + ","
+ }
+ repeatedStringForFields += "}"
+ repeatedStringForOptions := "[]*Option{"
+ for _, f := range this.Options {
+ repeatedStringForOptions += strings.Replace(f.String(), "Option", "Option", 1) + ","
+ }
+ repeatedStringForOptions += "}"
s := strings.Join([]string{`&Type{`,
`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
- `Fields:` + strings.Replace(fmt.Sprintf("%v", this.Fields), "Field", "Field", 1) + `,`,
+ `Fields:` + repeatedStringForFields + `,`,
`Oneofs:` + fmt.Sprintf("%v", this.Oneofs) + `,`,
- `Options:` + strings.Replace(fmt.Sprintf("%v", this.Options), "Option", "Option", 1) + `,`,
+ `Options:` + repeatedStringForOptions + `,`,
`SourceContext:` + strings.Replace(fmt.Sprintf("%v", this.SourceContext), "SourceContext", "SourceContext", 1) + `,`,
`Syntax:` + fmt.Sprintf("%v", this.Syntax) + `,`,
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
@@ -2121,6 +2177,11 @@ func (this *Field) String() string {
if this == nil {
return "nil"
}
+ repeatedStringForOptions := "[]*Option{"
+ for _, f := range this.Options {
+ repeatedStringForOptions += strings.Replace(f.String(), "Option", "Option", 1) + ","
+ }
+ repeatedStringForOptions += "}"
s := strings.Join([]string{`&Field{`,
`Kind:` + fmt.Sprintf("%v", this.Kind) + `,`,
`Cardinality:` + fmt.Sprintf("%v", this.Cardinality) + `,`,
@@ -2129,7 +2190,7 @@ func (this *Field) String() string {
`TypeUrl:` + fmt.Sprintf("%v", this.TypeUrl) + `,`,
`OneofIndex:` + fmt.Sprintf("%v", this.OneofIndex) + `,`,
`Packed:` + fmt.Sprintf("%v", this.Packed) + `,`,
- `Options:` + strings.Replace(fmt.Sprintf("%v", this.Options), "Option", "Option", 1) + `,`,
+ `Options:` + repeatedStringForOptions + `,`,
`JsonName:` + fmt.Sprintf("%v", this.JsonName) + `,`,
`DefaultValue:` + fmt.Sprintf("%v", this.DefaultValue) + `,`,
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
@@ -2141,10 +2202,20 @@ func (this *Enum) String() string {
if this == nil {
return "nil"
}
+ repeatedStringForEnumvalue := "[]*EnumValue{"
+ for _, f := range this.Enumvalue {
+ repeatedStringForEnumvalue += strings.Replace(f.String(), "EnumValue", "EnumValue", 1) + ","
+ }
+ repeatedStringForEnumvalue += "}"
+ repeatedStringForOptions := "[]*Option{"
+ for _, f := range this.Options {
+ repeatedStringForOptions += strings.Replace(f.String(), "Option", "Option", 1) + ","
+ }
+ repeatedStringForOptions += "}"
s := strings.Join([]string{`&Enum{`,
`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
- `Enumvalue:` + strings.Replace(fmt.Sprintf("%v", this.Enumvalue), "EnumValue", "EnumValue", 1) + `,`,
- `Options:` + strings.Replace(fmt.Sprintf("%v", this.Options), "Option", "Option", 1) + `,`,
+ `Enumvalue:` + repeatedStringForEnumvalue + `,`,
+ `Options:` + repeatedStringForOptions + `,`,
`SourceContext:` + strings.Replace(fmt.Sprintf("%v", this.SourceContext), "SourceContext", "SourceContext", 1) + `,`,
`Syntax:` + fmt.Sprintf("%v", this.Syntax) + `,`,
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
@@ -2156,10 +2227,15 @@ func (this *EnumValue) String() string {
if this == nil {
return "nil"
}
+ repeatedStringForOptions := "[]*Option{"
+ for _, f := range this.Options {
+ repeatedStringForOptions += strings.Replace(f.String(), "Option", "Option", 1) + ","
+ }
+ repeatedStringForOptions += "}"
s := strings.Join([]string{`&EnumValue{`,
`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
`Number:` + fmt.Sprintf("%v", this.Number) + `,`,
- `Options:` + strings.Replace(fmt.Sprintf("%v", this.Options), "Option", "Option", 1) + `,`,
+ `Options:` + repeatedStringForOptions + `,`,
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
`}`,
}, "")
@@ -3211,6 +3287,7 @@ func (m *Option) Unmarshal(dAtA []byte) error {
func skipType(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
+ depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
@@ -3242,10 +3319,8 @@ func skipType(dAtA []byte) (n int, err error) {
break
}
}
- return iNdEx, nil
case 1:
iNdEx += 8
- return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
@@ -3266,55 +3341,30 @@ func skipType(dAtA []byte) (n int, err error) {
return 0, ErrInvalidLengthType
}
iNdEx += length
- if iNdEx < 0 {
- return 0, ErrInvalidLengthType
- }
- return iNdEx, nil
case 3:
- for {
- var innerWire uint64
- var start int = iNdEx
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowType
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- innerWire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- innerWireType := int(innerWire & 0x7)
- if innerWireType == 4 {
- break
- }
- next, err := skipType(dAtA[start:])
- if err != nil {
- return 0, err
- }
- iNdEx = start + next
- if iNdEx < 0 {
- return 0, ErrInvalidLengthType
- }
- }
- return iNdEx, nil
+ depth++
case 4:
- return iNdEx, nil
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupType
+ }
+ depth--
case 5:
iNdEx += 4
- return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthType
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
}
- panic("unreachable")
+ return 0, io.ErrUnexpectedEOF
}
var (
- ErrInvalidLengthType = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowType = fmt.Errorf("proto: integer overflow")
+ ErrInvalidLengthType = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowType = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupType = fmt.Errorf("proto: unexpected end of group")
)
diff --git a/vendor/github.com/gogo/protobuf/types/wrappers.pb.go b/vendor/github.com/gogo/protobuf/types/wrappers.pb.go
index 5ade933ef..8f1edb57d 100644
--- a/vendor/github.com/gogo/protobuf/types/wrappers.pb.go
+++ b/vendor/github.com/gogo/protobuf/types/wrappers.pb.go
@@ -10,6 +10,7 @@ import (
proto "github.com/gogo/protobuf/proto"
io "io"
math "math"
+ math_bits "math/bits"
reflect "reflect"
strings "strings"
)
@@ -23,7 +24,7 @@ var _ = math.Inf
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
// Wrapper message for `double`.
//
@@ -50,7 +51,7 @@ func (m *DoubleValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
return xxx_messageInfo_DoubleValue.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -105,7 +106,7 @@ func (m *FloatValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_FloatValue.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -160,7 +161,7 @@ func (m *Int64Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Int64Value.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -215,7 +216,7 @@ func (m *UInt64Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
return xxx_messageInfo_UInt64Value.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -270,7 +271,7 @@ func (m *Int32Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Int32Value.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -325,7 +326,7 @@ func (m *UInt32Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
return xxx_messageInfo_UInt32Value.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -380,7 +381,7 @@ func (m *BoolValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_BoolValue.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -435,7 +436,7 @@ func (m *StringValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
return xxx_messageInfo_StringValue.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -490,7 +491,7 @@ func (m *BytesValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_BytesValue.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
- n, err := m.MarshalTo(b)
+ n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@@ -1247,7 +1248,7 @@ func valueToGoStringWrappers(v interface{}, typ string) string {
func (m *DoubleValue) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1255,26 +1256,32 @@ func (m *DoubleValue) Marshal() (dAtA []byte, err error) {
}
func (m *DoubleValue) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DoubleValue) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if m.Value != 0 {
- dAtA[i] = 0x9
- i++
+ i -= 8
encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value))))
- i += 8
- }
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ i--
+ dAtA[i] = 0x9
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *FloatValue) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1282,26 +1289,32 @@ func (m *FloatValue) Marshal() (dAtA []byte, err error) {
}
func (m *FloatValue) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *FloatValue) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if m.Value != 0 {
- dAtA[i] = 0xd
- i++
+ i -= 4
encoding_binary.LittleEndian.PutUint32(dAtA[i:], uint32(math.Float32bits(float32(m.Value))))
- i += 4
- }
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ i--
+ dAtA[i] = 0xd
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *Int64Value) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1309,25 +1322,31 @@ func (m *Int64Value) Marshal() (dAtA []byte, err error) {
}
func (m *Int64Value) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Int64Value) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if m.Value != 0 {
- dAtA[i] = 0x8
- i++
i = encodeVarintWrappers(dAtA, i, uint64(m.Value))
+ i--
+ dAtA[i] = 0x8
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
- }
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *UInt64Value) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1335,25 +1354,31 @@ func (m *UInt64Value) Marshal() (dAtA []byte, err error) {
}
func (m *UInt64Value) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *UInt64Value) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if m.Value != 0 {
- dAtA[i] = 0x8
- i++
i = encodeVarintWrappers(dAtA, i, uint64(m.Value))
+ i--
+ dAtA[i] = 0x8
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
- }
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *Int32Value) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1361,25 +1386,31 @@ func (m *Int32Value) Marshal() (dAtA []byte, err error) {
}
func (m *Int32Value) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Int32Value) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if m.Value != 0 {
- dAtA[i] = 0x8
- i++
i = encodeVarintWrappers(dAtA, i, uint64(m.Value))
+ i--
+ dAtA[i] = 0x8
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
- }
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *UInt32Value) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1387,25 +1418,31 @@ func (m *UInt32Value) Marshal() (dAtA []byte, err error) {
}
func (m *UInt32Value) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *UInt32Value) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if m.Value != 0 {
- dAtA[i] = 0x8
- i++
i = encodeVarintWrappers(dAtA, i, uint64(m.Value))
+ i--
+ dAtA[i] = 0x8
}
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
- }
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *BoolValue) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1413,30 +1450,36 @@ func (m *BoolValue) Marshal() (dAtA []byte, err error) {
}
func (m *BoolValue) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *BoolValue) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if m.Value {
- dAtA[i] = 0x8
- i++
+ i--
if m.Value {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
- i++
- }
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ i--
+ dAtA[i] = 0x8
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *StringValue) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1444,26 +1487,33 @@ func (m *StringValue) Marshal() (dAtA []byte, err error) {
}
func (m *StringValue) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *StringValue) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if len(m.Value) > 0 {
- dAtA[i] = 0xa
- i++
+ i -= len(m.Value)
+ copy(dAtA[i:], m.Value)
i = encodeVarintWrappers(dAtA, i, uint64(len(m.Value)))
- i += copy(dAtA[i:], m.Value)
- }
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func (m *BytesValue) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -1471,30 +1521,39 @@ func (m *BytesValue) Marshal() (dAtA []byte, err error) {
}
func (m *BytesValue) MarshalTo(dAtA []byte) (int, error) {
- var i int
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *BytesValue) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
_ = i
var l int
_ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
if len(m.Value) > 0 {
- dAtA[i] = 0xa
- i++
+ i -= len(m.Value)
+ copy(dAtA[i:], m.Value)
i = encodeVarintWrappers(dAtA, i, uint64(len(m.Value)))
- i += copy(dAtA[i:], m.Value)
- }
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
+ i--
+ dAtA[i] = 0xa
}
- return i, nil
+ return len(dAtA) - i, nil
}
func encodeVarintWrappers(dAtA []byte, offset int, v uint64) int {
+ offset -= sovWrappers(v)
+ base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
- return offset + 1
+ return base
}
func NewPopulatedDoubleValue(r randyWrappers, easy bool) *DoubleValue {
this := &DoubleValue{}
@@ -1803,14 +1862,7 @@ func (m *BytesValue) Size() (n int) {
}
func sovWrappers(x uint64) (n int) {
- for {
- n++
- x >>= 7
- if x == 0 {
- break
- }
- }
- return n
+ return (math_bits.Len64(x|1) + 6) / 7
}
func sozWrappers(x uint64) (n int) {
return sovWrappers(uint64((x << 1) ^ uint64((int64(x) >> 63))))
@@ -2595,6 +2647,7 @@ func (m *BytesValue) Unmarshal(dAtA []byte) error {
func skipWrappers(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
+ depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
@@ -2626,10 +2679,8 @@ func skipWrappers(dAtA []byte) (n int, err error) {
break
}
}
- return iNdEx, nil
case 1:
iNdEx += 8
- return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
@@ -2650,55 +2701,30 @@ func skipWrappers(dAtA []byte) (n int, err error) {
return 0, ErrInvalidLengthWrappers
}
iNdEx += length
- if iNdEx < 0 {
- return 0, ErrInvalidLengthWrappers
- }
- return iNdEx, nil
case 3:
- for {
- var innerWire uint64
- var start int = iNdEx
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowWrappers
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- innerWire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- innerWireType := int(innerWire & 0x7)
- if innerWireType == 4 {
- break
- }
- next, err := skipWrappers(dAtA[start:])
- if err != nil {
- return 0, err
- }
- iNdEx = start + next
- if iNdEx < 0 {
- return 0, ErrInvalidLengthWrappers
- }
- }
- return iNdEx, nil
+ depth++
case 4:
- return iNdEx, nil
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupWrappers
+ }
+ depth--
case 5:
iNdEx += 4
- return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthWrappers
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
}
- panic("unreachable")
+ return 0, io.ErrUnexpectedEOF
}
var (
- ErrInvalidLengthWrappers = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowWrappers = fmt.Errorf("proto: integer overflow")
+ ErrInvalidLengthWrappers = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowWrappers = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupWrappers = fmt.Errorf("proto: unexpected end of group")
)
diff --git a/vendor/github.com/golang/protobuf/README.md b/vendor/github.com/golang/protobuf/README.md
index 61820bed6..2e253e6f8 100644
--- a/vendor/github.com/golang/protobuf/README.md
+++ b/vendor/github.com/golang/protobuf/README.md
@@ -7,7 +7,7 @@ Google's data interchange format.
Copyright 2010 The Go Authors.
https://github.com/golang/protobuf
-This package and the code it generates requires at least Go 1.6.
+This package and the code it generates requires at least Go 1.9.
This software implements Go bindings for protocol buffers. For
information about protocol buffers themselves, see
@@ -24,11 +24,22 @@ To use this software, you must:
https://golang.org/doc/install
for details or, if you are using gccgo, follow the instructions at
https://golang.org/doc/install/gccgo
-- Grab the code from the repository and install the proto package.
- The simplest way is to run `go get -u github.com/golang/protobuf/protoc-gen-go`.
- The compiler plugin, protoc-gen-go, will be installed in $GOBIN,
- defaulting to $GOPATH/bin. It must be in your $PATH for the protocol
- compiler, protoc, to find it.
+- Grab the code from the repository and install the `proto` package.
+ The simplest way is to run:
+ ```
+ go get -u github.com/golang/protobuf/protoc-gen-go
+ ```
+ The compiler plugin, `protoc-gen-go`, will be installed in `$GOPATH/bin`
+ unless `$GOBIN` is set. It must be in your `$PATH` for the protocol
+ compiler, `protoc`, to find it.
+- If you need a particular version of `protoc-gen-go` (e.g., to match your
+ `proto` package version), one option is
+ ```shell
+ GIT_TAG="v1.2.0" # change as needed
+ go get -d -u github.com/golang/protobuf/protoc-gen-go
+ git -C "$(go env GOPATH)"/src/github.com/golang/protobuf checkout $GIT_TAG
+ go install github.com/golang/protobuf/protoc-gen-go
+ ```
This software has two parts: a 'protocol compiler plugin' that
generates Go source files that, once compiled, can access and manage
diff --git a/vendor/github.com/golang/protobuf/go.mod b/vendor/github.com/golang/protobuf/go.mod
new file mode 100644
index 000000000..de28f6f0a
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/go.mod
@@ -0,0 +1,3 @@
+module github.com/golang/protobuf
+
+go 1.12
diff --git a/vendor/github.com/golang/protobuf/proto/decode.go b/vendor/github.com/golang/protobuf/proto/decode.go
index d9aa3c42d..63b0f08be 100644
--- a/vendor/github.com/golang/protobuf/proto/decode.go
+++ b/vendor/github.com/golang/protobuf/proto/decode.go
@@ -186,7 +186,6 @@ func (p *Buffer) DecodeVarint() (x uint64, err error) {
if b&0x80 == 0 {
goto done
}
- // x -= 0x80 << 63 // Always zero.
return 0, errOverflow
diff --git a/vendor/github.com/golang/protobuf/proto/deprecated.go b/vendor/github.com/golang/protobuf/proto/deprecated.go
new file mode 100644
index 000000000..35b882c09
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/deprecated.go
@@ -0,0 +1,63 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2018 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import "errors"
+
+// Deprecated: do not use.
+type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 }
+
+// Deprecated: do not use.
+func GetStats() Stats { return Stats{} }
+
+// Deprecated: do not use.
+func MarshalMessageSet(interface{}) ([]byte, error) {
+ return nil, errors.New("proto: not implemented")
+}
+
+// Deprecated: do not use.
+func UnmarshalMessageSet([]byte, interface{}) error {
+ return errors.New("proto: not implemented")
+}
+
+// Deprecated: do not use.
+func MarshalMessageSetJSON(interface{}) ([]byte, error) {
+ return nil, errors.New("proto: not implemented")
+}
+
+// Deprecated: do not use.
+func UnmarshalMessageSetJSON([]byte, interface{}) error {
+ return errors.New("proto: not implemented")
+}
+
+// Deprecated: do not use.
+func RegisterMessageSetType(Message, int32, string) {}
diff --git a/vendor/github.com/golang/protobuf/proto/equal.go b/vendor/github.com/golang/protobuf/proto/equal.go
index d4db5a1c1..f9b6e41b3 100644
--- a/vendor/github.com/golang/protobuf/proto/equal.go
+++ b/vendor/github.com/golang/protobuf/proto/equal.go
@@ -246,7 +246,8 @@ func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool {
return false
}
- m1, m2 := e1.value, e2.value
+ m1 := extensionAsLegacyType(e1.value)
+ m2 := extensionAsLegacyType(e2.value)
if m1 == nil && m2 == nil {
// Both have only encoded form.
diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go
index 816a3b9d6..fa88add30 100644
--- a/vendor/github.com/golang/protobuf/proto/extensions.go
+++ b/vendor/github.com/golang/protobuf/proto/extensions.go
@@ -185,9 +185,25 @@ type Extension struct {
// extension will have only enc set. When such an extension is
// accessed using GetExtension (or GetExtensions) desc and value
// will be set.
- desc *ExtensionDesc
+ desc *ExtensionDesc
+
+ // value is a concrete value for the extension field. Let the type of
+ // desc.ExtensionType be the "API type" and the type of Extension.value
+ // be the "storage type". The API type and storage type are the same except:
+ // * For scalars (except []byte), the API type uses *T,
+ // while the storage type uses T.
+ // * For repeated fields, the API type uses []T, while the storage type
+ // uses *[]T.
+ //
+ // The reason for the divergence is so that the storage type more naturally
+ // matches what is expected of when retrieving the values through the
+ // protobuf reflection APIs.
+ //
+ // The value may only be populated if desc is also populated.
value interface{}
- enc []byte
+
+ // enc is the raw bytes for the extension field.
+ enc []byte
}
// SetRawExtension is for testing only.
@@ -334,7 +350,7 @@ func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) {
// descriptors with the same field number.
return nil, errors.New("proto: descriptor conflict")
}
- return e.value, nil
+ return extensionAsLegacyType(e.value), nil
}
if extension.ExtensionType == nil {
@@ -349,11 +365,11 @@ func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) {
// Remember the decoded version and drop the encoded version.
// That way it is safe to mutate what we return.
- e.value = v
+ e.value = extensionAsStorageType(v)
e.desc = extension
e.enc = nil
emap[extension.Field] = e
- return e.value, nil
+ return extensionAsLegacyType(e.value), nil
}
// defaultExtensionValue returns the default value for extension.
@@ -488,7 +504,7 @@ func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error
}
typ := reflect.TypeOf(extension.ExtensionType)
if typ != reflect.TypeOf(value) {
- return errors.New("proto: bad extension value type")
+ return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", value, extension.ExtensionType)
}
// nil extension values need to be caught early, because the
// encoder can't distinguish an ErrNil due to a nil extension
@@ -500,7 +516,7 @@ func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error
}
extmap := epb.extensionsWrite()
- extmap[extension.Field] = Extension{desc: extension, value: value}
+ extmap[extension.Field] = Extension{desc: extension, value: extensionAsStorageType(value)}
return nil
}
@@ -541,3 +557,51 @@ func RegisterExtension(desc *ExtensionDesc) {
func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc {
return extensionMaps[reflect.TypeOf(pb).Elem()]
}
+
+// extensionAsLegacyType converts an value in the storage type as the API type.
+// See Extension.value.
+func extensionAsLegacyType(v interface{}) interface{} {
+ switch rv := reflect.ValueOf(v); rv.Kind() {
+ case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String:
+ // Represent primitive types as a pointer to the value.
+ rv2 := reflect.New(rv.Type())
+ rv2.Elem().Set(rv)
+ v = rv2.Interface()
+ case reflect.Ptr:
+ // Represent slice types as the value itself.
+ switch rv.Type().Elem().Kind() {
+ case reflect.Slice:
+ if rv.IsNil() {
+ v = reflect.Zero(rv.Type().Elem()).Interface()
+ } else {
+ v = rv.Elem().Interface()
+ }
+ }
+ }
+ return v
+}
+
+// extensionAsStorageType converts an value in the API type as the storage type.
+// See Extension.value.
+func extensionAsStorageType(v interface{}) interface{} {
+ switch rv := reflect.ValueOf(v); rv.Kind() {
+ case reflect.Ptr:
+ // Represent slice types as the value itself.
+ switch rv.Type().Elem().Kind() {
+ case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String:
+ if rv.IsNil() {
+ v = reflect.Zero(rv.Type().Elem()).Interface()
+ } else {
+ v = rv.Elem().Interface()
+ }
+ }
+ case reflect.Slice:
+ // Represent slice types as a pointer to the value.
+ if rv.Type().Elem().Kind() != reflect.Uint8 {
+ rv2 := reflect.New(rv.Type())
+ rv2.Elem().Set(rv)
+ v = rv2.Interface()
+ }
+ }
+ return v
+}
diff --git a/vendor/github.com/golang/protobuf/proto/lib.go b/vendor/github.com/golang/protobuf/proto/lib.go
index 75565cc6d..70fbda532 100644
--- a/vendor/github.com/golang/protobuf/proto/lib.go
+++ b/vendor/github.com/golang/protobuf/proto/lib.go
@@ -341,26 +341,6 @@ type Message interface {
ProtoMessage()
}
-// Stats records allocation details about the protocol buffer encoders
-// and decoders. Useful for tuning the library itself.
-type Stats struct {
- Emalloc uint64 // mallocs in encode
- Dmalloc uint64 // mallocs in decode
- Encode uint64 // number of encodes
- Decode uint64 // number of decodes
- Chit uint64 // number of cache hits
- Cmiss uint64 // number of cache misses
- Size uint64 // number of sizes
-}
-
-// Set to true to enable stats collection.
-const collectStats = false
-
-var stats Stats
-
-// GetStats returns a copy of the global Stats structure.
-func GetStats() Stats { return stats }
-
// A Buffer is a buffer manager for marshaling and unmarshaling
// protocol buffers. It may be reused between invocations to
// reduce memory usage. It is not necessary to use a Buffer;
@@ -413,7 +393,7 @@ func (p *Buffer) Bytes() []byte { return p.buf }
// than relying on this API.
//
// If deterministic serialization is requested, map entries will be sorted
-// by keys in lexographical order. This is an implementation detail and
+// by keys in lexicographical order. This is an implementation detail and
// subject to change.
func (p *Buffer) SetDeterministic(deterministic bool) {
p.deterministic = deterministic
@@ -960,13 +940,19 @@ func isProto3Zero(v reflect.Value) bool {
return false
}
-// ProtoPackageIsVersion2 is referenced from generated protocol buffer files
-// to assert that that code is compatible with this version of the proto package.
-const ProtoPackageIsVersion2 = true
+const (
+ // ProtoPackageIsVersion3 is referenced from generated protocol buffer files
+ // to assert that that code is compatible with this version of the proto package.
+ ProtoPackageIsVersion3 = true
+
+ // ProtoPackageIsVersion2 is referenced from generated protocol buffer files
+ // to assert that that code is compatible with this version of the proto package.
+ ProtoPackageIsVersion2 = true
-// ProtoPackageIsVersion1 is referenced from generated protocol buffer files
-// to assert that that code is compatible with this version of the proto package.
-const ProtoPackageIsVersion1 = true
+ // ProtoPackageIsVersion1 is referenced from generated protocol buffer files
+ // to assert that that code is compatible with this version of the proto package.
+ ProtoPackageIsVersion1 = true
+)
// InternalMessageInfo is a type used internally by generated .pb.go files.
// This type is not intended to be used by non-generated code.
diff --git a/vendor/github.com/golang/protobuf/proto/message_set.go b/vendor/github.com/golang/protobuf/proto/message_set.go
index 3b6ca41d5..f48a75676 100644
--- a/vendor/github.com/golang/protobuf/proto/message_set.go
+++ b/vendor/github.com/golang/protobuf/proto/message_set.go
@@ -36,13 +36,7 @@ package proto
*/
import (
- "bytes"
- "encoding/json"
"errors"
- "fmt"
- "reflect"
- "sort"
- "sync"
)
// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID.
@@ -145,46 +139,9 @@ func skipVarint(buf []byte) []byte {
return buf[i+1:]
}
-// MarshalMessageSet encodes the extension map represented by m in the message set wire format.
-// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option.
-func MarshalMessageSet(exts interface{}) ([]byte, error) {
- return marshalMessageSet(exts, false)
-}
-
-// marshaMessageSet implements above function, with the opt to turn on / off deterministic during Marshal.
-func marshalMessageSet(exts interface{}, deterministic bool) ([]byte, error) {
- switch exts := exts.(type) {
- case *XXX_InternalExtensions:
- var u marshalInfo
- siz := u.sizeMessageSet(exts)
- b := make([]byte, 0, siz)
- return u.appendMessageSet(b, exts, deterministic)
-
- case map[int32]Extension:
- // This is an old-style extension map.
- // Wrap it in a new-style XXX_InternalExtensions.
- ie := XXX_InternalExtensions{
- p: &struct {
- mu sync.Mutex
- extensionMap map[int32]Extension
- }{
- extensionMap: exts,
- },
- }
-
- var u marshalInfo
- siz := u.sizeMessageSet(&ie)
- b := make([]byte, 0, siz)
- return u.appendMessageSet(b, &ie, deterministic)
-
- default:
- return nil, errors.New("proto: not an extension map")
- }
-}
-
-// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
+// unmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
-func UnmarshalMessageSet(buf []byte, exts interface{}) error {
+func unmarshalMessageSet(buf []byte, exts interface{}) error {
var m map[int32]Extension
switch exts := exts.(type) {
case *XXX_InternalExtensions:
@@ -222,93 +179,3 @@ func UnmarshalMessageSet(buf []byte, exts interface{}) error {
}
return nil
}
-
-// MarshalMessageSetJSON encodes the extension map represented by m in JSON format.
-// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
-func MarshalMessageSetJSON(exts interface{}) ([]byte, error) {
- var m map[int32]Extension
- switch exts := exts.(type) {
- case *XXX_InternalExtensions:
- var mu sync.Locker
- m, mu = exts.extensionsRead()
- if m != nil {
- // Keep the extensions map locked until we're done marshaling to prevent
- // races between marshaling and unmarshaling the lazily-{en,de}coded
- // values.
- mu.Lock()
- defer mu.Unlock()
- }
- case map[int32]Extension:
- m = exts
- default:
- return nil, errors.New("proto: not an extension map")
- }
- var b bytes.Buffer
- b.WriteByte('{')
-
- // Process the map in key order for deterministic output.
- ids := make([]int32, 0, len(m))
- for id := range m {
- ids = append(ids, id)
- }
- sort.Sort(int32Slice(ids)) // int32Slice defined in text.go
-
- for i, id := range ids {
- ext := m[id]
- msd, ok := messageSetMap[id]
- if !ok {
- // Unknown type; we can't render it, so skip it.
- continue
- }
-
- if i > 0 && b.Len() > 1 {
- b.WriteByte(',')
- }
-
- fmt.Fprintf(&b, `"[%s]":`, msd.name)
-
- x := ext.value
- if x == nil {
- x = reflect.New(msd.t.Elem()).Interface()
- if err := Unmarshal(ext.enc, x.(Message)); err != nil {
- return nil, err
- }
- }
- d, err := json.Marshal(x)
- if err != nil {
- return nil, err
- }
- b.Write(d)
- }
- b.WriteByte('}')
- return b.Bytes(), nil
-}
-
-// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format.
-// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
-func UnmarshalMessageSetJSON(buf []byte, exts interface{}) error {
- // Common-case fast path.
- if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) {
- return nil
- }
-
- // This is fairly tricky, and it's not clear that it is needed.
- return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented")
-}
-
-// A global registry of types that can be used in a MessageSet.
-
-var messageSetMap = make(map[int32]messageSetDesc)
-
-type messageSetDesc struct {
- t reflect.Type // pointer to struct
- name string
-}
-
-// RegisterMessageSetType is called from the generated code.
-func RegisterMessageSetType(m Message, fieldNum int32, name string) {
- messageSetMap[fieldNum] = messageSetDesc{
- t: reflect.TypeOf(m),
- name: name,
- }
-}
diff --git a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go
index b6cad9083..94fa9194a 100644
--- a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go
+++ b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go
@@ -79,10 +79,13 @@ func toPointer(i *Message) pointer {
// toAddrPointer converts an interface to a pointer that points to
// the interface data.
-func toAddrPointer(i *interface{}, isptr bool) pointer {
+func toAddrPointer(i *interface{}, isptr, deref bool) pointer {
v := reflect.ValueOf(*i)
u := reflect.New(v.Type())
u.Elem().Set(v)
+ if deref {
+ u = u.Elem()
+ }
return pointer{v: u}
}
diff --git a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
index d55a335d9..dbfffe071 100644
--- a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
+++ b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
@@ -85,16 +85,21 @@ func toPointer(i *Message) pointer {
// toAddrPointer converts an interface to a pointer that points to
// the interface data.
-func toAddrPointer(i *interface{}, isptr bool) pointer {
+func toAddrPointer(i *interface{}, isptr, deref bool) (p pointer) {
// Super-tricky - read or get the address of data word of interface value.
if isptr {
// The interface is of pointer type, thus it is a direct interface.
// The data word is the pointer data itself. We take its address.
- return pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)}
+ p = pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)}
+ } else {
+ // The interface is not of pointer type. The data word is the pointer
+ // to the data.
+ p = pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
}
- // The interface is not of pointer type. The data word is the pointer
- // to the data.
- return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
+ if deref {
+ p.p = *(*unsafe.Pointer)(p.p)
+ }
+ return p
}
// valToPointer converts v to a pointer. v must be of pointer type.
diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go
index 50b99b83a..a4b8c0cd3 100644
--- a/vendor/github.com/golang/protobuf/proto/properties.go
+++ b/vendor/github.com/golang/protobuf/proto/properties.go
@@ -38,7 +38,6 @@ package proto
import (
"fmt"
"log"
- "os"
"reflect"
"sort"
"strconv"
@@ -194,7 +193,7 @@ func (p *Properties) Parse(s string) {
// "bytes,49,opt,name=foo,def=hello!"
fields := strings.Split(s, ",") // breaks def=, but handled below.
if len(fields) < 2 {
- fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s)
+ log.Printf("proto: tag has too few fields: %q", s)
return
}
@@ -214,7 +213,7 @@ func (p *Properties) Parse(s string) {
p.WireType = WireBytes
// no numeric converter for non-numeric types
default:
- fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s)
+ log.Printf("proto: tag has unknown wire type: %q", s)
return
}
@@ -334,9 +333,6 @@ func GetProperties(t reflect.Type) *StructProperties {
sprop, ok := propertiesMap[t]
propertiesMu.RUnlock()
if ok {
- if collectStats {
- stats.Chit++
- }
return sprop
}
@@ -346,17 +342,20 @@ func GetProperties(t reflect.Type) *StructProperties {
return sprop
}
+type (
+ oneofFuncsIface interface {
+ XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
+ }
+ oneofWrappersIface interface {
+ XXX_OneofWrappers() []interface{}
+ }
+)
+
// getPropertiesLocked requires that propertiesMu is held.
func getPropertiesLocked(t reflect.Type) *StructProperties {
if prop, ok := propertiesMap[t]; ok {
- if collectStats {
- stats.Chit++
- }
return prop
}
- if collectStats {
- stats.Cmiss++
- }
prop := new(StructProperties)
// in case of recursive protos, fill this in now.
@@ -391,13 +390,14 @@ func getPropertiesLocked(t reflect.Type) *StructProperties {
// Re-order prop.order.
sort.Sort(prop)
- type oneofMessage interface {
- XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
+ var oots []interface{}
+ switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
+ case oneofFuncsIface:
+ _, _, _, oots = m.XXX_OneofFuncs()
+ case oneofWrappersIface:
+ oots = m.XXX_OneofWrappers()
}
- if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok {
- var oots []interface{}
- _, _, _, oots = om.XXX_OneofFuncs()
-
+ if len(oots) > 0 {
// Interpret oneof metadata.
prop.OneofTypes = make(map[string]*OneofProperties)
for _, oot := range oots {
diff --git a/vendor/github.com/golang/protobuf/proto/table_marshal.go b/vendor/github.com/golang/protobuf/proto/table_marshal.go
index b16794496..5cb11fa95 100644
--- a/vendor/github.com/golang/protobuf/proto/table_marshal.go
+++ b/vendor/github.com/golang/protobuf/proto/table_marshal.go
@@ -87,6 +87,7 @@ type marshalElemInfo struct {
sizer sizer
marshaler marshaler
isptr bool // elem is pointer typed, thus interface of this type is a direct interface (extension only)
+ deref bool // dereference the pointer before operating on it; implies isptr
}
var (
@@ -320,8 +321,11 @@ func (u *marshalInfo) computeMarshalInfo() {
// get oneof implementers
var oneofImplementers []interface{}
- if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok {
+ switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
+ case oneofFuncsIface:
_, _, _, oneofImplementers = m.XXX_OneofFuncs()
+ case oneofWrappersIface:
+ oneofImplementers = m.XXX_OneofWrappers()
}
n := t.NumField()
@@ -407,13 +411,22 @@ func (u *marshalInfo) getExtElemInfo(desc *ExtensionDesc) *marshalElemInfo {
panic("tag is not an integer")
}
wt := wiretype(tags[0])
+ if t.Kind() == reflect.Ptr && t.Elem().Kind() != reflect.Struct {
+ t = t.Elem()
+ }
sizer, marshaler := typeMarshaler(t, tags, false, false)
+ var deref bool
+ if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 {
+ t = reflect.PtrTo(t)
+ deref = true
+ }
e = &marshalElemInfo{
wiretag: uint64(tag)<<3 | wt,
tagsize: SizeVarint(uint64(tag) << 3),
sizer: sizer,
marshaler: marshaler,
isptr: t.Kind() == reflect.Ptr,
+ deref: deref,
}
// update cache
@@ -448,7 +461,7 @@ func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) {
func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) {
fi.field = toField(f)
- fi.wiretag = 1<<31 - 1 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire.
+ fi.wiretag = math.MaxInt32 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire.
fi.isPointer = true
fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f)
fi.oneofElems = make(map[reflect.Type]*marshalElemInfo)
@@ -476,10 +489,6 @@ func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofI
}
}
-type oneofMessage interface {
- XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
-}
-
// wiretype returns the wire encoding of the type.
func wiretype(encoding string) uint64 {
switch encoding {
@@ -2310,8 +2319,8 @@ func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) {
for _, k := range m.MapKeys() {
ki := k.Interface()
vi := m.MapIndex(k).Interface()
- kaddr := toAddrPointer(&ki, false) // pointer to key
- vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value
+ kaddr := toAddrPointer(&ki, false, false) // pointer to key
+ vaddr := toAddrPointer(&vi, valIsPtr, false) // pointer to value
siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1)
n += siz + SizeVarint(uint64(siz)) + tagsize
}
@@ -2329,8 +2338,8 @@ func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) {
for _, k := range keys {
ki := k.Interface()
vi := m.MapIndex(k).Interface()
- kaddr := toAddrPointer(&ki, false) // pointer to key
- vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value
+ kaddr := toAddrPointer(&ki, false, false) // pointer to key
+ vaddr := toAddrPointer(&vi, valIsPtr, false) // pointer to value
b = appendVarint(b, tag)
siz := keySizer(kaddr, 1) + valCachedSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1)
b = appendVarint(b, uint64(siz))
@@ -2399,7 +2408,7 @@ func (u *marshalInfo) sizeExtensions(ext *XXX_InternalExtensions) int {
// the last time this function was called.
ei := u.getExtElemInfo(e.desc)
v := e.value
- p := toAddrPointer(&v, ei.isptr)
+ p := toAddrPointer(&v, ei.isptr, ei.deref)
n += ei.sizer(p, ei.tagsize)
}
mu.Unlock()
@@ -2434,7 +2443,7 @@ func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, de
ei := u.getExtElemInfo(e.desc)
v := e.value
- p := toAddrPointer(&v, ei.isptr)
+ p := toAddrPointer(&v, ei.isptr, ei.deref)
b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
if !nerr.Merge(err) {
return b, err
@@ -2465,7 +2474,7 @@ func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, de
ei := u.getExtElemInfo(e.desc)
v := e.value
- p := toAddrPointer(&v, ei.isptr)
+ p := toAddrPointer(&v, ei.isptr, ei.deref)
b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
if !nerr.Merge(err) {
return b, err
@@ -2510,7 +2519,7 @@ func (u *marshalInfo) sizeMessageSet(ext *XXX_InternalExtensions) int {
ei := u.getExtElemInfo(e.desc)
v := e.value
- p := toAddrPointer(&v, ei.isptr)
+ p := toAddrPointer(&v, ei.isptr, ei.deref)
n += ei.sizer(p, 1) // message, tag = 3 (size=1)
}
mu.Unlock()
@@ -2553,7 +2562,7 @@ func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, de
ei := u.getExtElemInfo(e.desc)
v := e.value
- p := toAddrPointer(&v, ei.isptr)
+ p := toAddrPointer(&v, ei.isptr, ei.deref)
b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic)
if !nerr.Merge(err) {
return b, err
@@ -2591,7 +2600,7 @@ func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, de
ei := u.getExtElemInfo(e.desc)
v := e.value
- p := toAddrPointer(&v, ei.isptr)
+ p := toAddrPointer(&v, ei.isptr, ei.deref)
b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic)
b = append(b, 1<<3|WireEndGroup)
if !nerr.Merge(err) {
@@ -2621,7 +2630,7 @@ func (u *marshalInfo) sizeV1Extensions(m map[int32]Extension) int {
ei := u.getExtElemInfo(e.desc)
v := e.value
- p := toAddrPointer(&v, ei.isptr)
+ p := toAddrPointer(&v, ei.isptr, ei.deref)
n += ei.sizer(p, ei.tagsize)
}
return n
@@ -2656,7 +2665,7 @@ func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, determ
ei := u.getExtElemInfo(e.desc)
v := e.value
- p := toAddrPointer(&v, ei.isptr)
+ p := toAddrPointer(&v, ei.isptr, ei.deref)
b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
if !nerr.Merge(err) {
return b, err
diff --git a/vendor/github.com/golang/protobuf/proto/table_unmarshal.go b/vendor/github.com/golang/protobuf/proto/table_unmarshal.go
index ebf1caa56..acee2fc52 100644
--- a/vendor/github.com/golang/protobuf/proto/table_unmarshal.go
+++ b/vendor/github.com/golang/protobuf/proto/table_unmarshal.go
@@ -136,7 +136,7 @@ func (u *unmarshalInfo) unmarshal(m pointer, b []byte) error {
u.computeUnmarshalInfo()
}
if u.isMessageSet {
- return UnmarshalMessageSet(b, m.offset(u.extensions).toExtensions())
+ return unmarshalMessageSet(b, m.offset(u.extensions).toExtensions())
}
var reqMask uint64 // bitmask of required fields we've seen.
var errLater error
@@ -362,46 +362,48 @@ func (u *unmarshalInfo) computeUnmarshalInfo() {
}
// Find any types associated with oneof fields.
- // TODO: XXX_OneofFuncs returns more info than we need. Get rid of some of it?
- fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("XXX_OneofFuncs")
- if fn.IsValid() {
- res := fn.Call(nil)[3] // last return value from XXX_OneofFuncs: []interface{}
- for i := res.Len() - 1; i >= 0; i-- {
- v := res.Index(i) // interface{}
- tptr := reflect.ValueOf(v.Interface()).Type() // *Msg_X
- typ := tptr.Elem() // Msg_X
-
- f := typ.Field(0) // oneof implementers have one field
- baseUnmarshal := fieldUnmarshaler(&f)
- tags := strings.Split(f.Tag.Get("protobuf"), ",")
- fieldNum, err := strconv.Atoi(tags[1])
- if err != nil {
- panic("protobuf tag field not an integer: " + tags[1])
- }
- var name string
- for _, tag := range tags {
- if strings.HasPrefix(tag, "name=") {
- name = strings.TrimPrefix(tag, "name=")
- break
- }
+ var oneofImplementers []interface{}
+ switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
+ case oneofFuncsIface:
+ _, _, _, oneofImplementers = m.XXX_OneofFuncs()
+ case oneofWrappersIface:
+ oneofImplementers = m.XXX_OneofWrappers()
+ }
+ for _, v := range oneofImplementers {
+ tptr := reflect.TypeOf(v) // *Msg_X
+ typ := tptr.Elem() // Msg_X
+
+ f := typ.Field(0) // oneof implementers have one field
+ baseUnmarshal := fieldUnmarshaler(&f)
+ tags := strings.Split(f.Tag.Get("protobuf"), ",")
+ fieldNum, err := strconv.Atoi(tags[1])
+ if err != nil {
+ panic("protobuf tag field not an integer: " + tags[1])
+ }
+ var name string
+ for _, tag := range tags {
+ if strings.HasPrefix(tag, "name=") {
+ name = strings.TrimPrefix(tag, "name=")
+ break
}
+ }
- // Find the oneof field that this struct implements.
- // Might take O(n^2) to process all of the oneofs, but who cares.
- for _, of := range oneofFields {
- if tptr.Implements(of.ityp) {
- // We have found the corresponding interface for this struct.
- // That lets us know where this struct should be stored
- // when we encounter it during unmarshaling.
- unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal)
- u.setTag(fieldNum, of.field, unmarshal, 0, name)
- }
+ // Find the oneof field that this struct implements.
+ // Might take O(n^2) to process all of the oneofs, but who cares.
+ for _, of := range oneofFields {
+ if tptr.Implements(of.ityp) {
+ // We have found the corresponding interface for this struct.
+ // That lets us know where this struct should be stored
+ // when we encounter it during unmarshaling.
+ unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal)
+ u.setTag(fieldNum, of.field, unmarshal, 0, name)
}
}
+
}
// Get extension ranges, if any.
- fn = reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray")
+ fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray")
if fn.IsValid() {
if !u.extensions.IsValid() && !u.oldExtensions.IsValid() {
panic("a message with extensions, but no extensions field in " + t.Name())
@@ -1948,7 +1950,7 @@ func encodeVarint(b []byte, x uint64) []byte {
// If there is an error, it returns 0,0.
func decodeVarint(b []byte) (uint64, int) {
var x, y uint64
- if len(b) <= 0 {
+ if len(b) == 0 {
goto bad
}
x = uint64(b[0])
diff --git a/vendor/github.com/golang/protobuf/proto/text.go b/vendor/github.com/golang/protobuf/proto/text.go
index 1aaee725b..d97f9b356 100644
--- a/vendor/github.com/golang/protobuf/proto/text.go
+++ b/vendor/github.com/golang/protobuf/proto/text.go
@@ -456,6 +456,8 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
return nil
}
+var textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem()
+
// writeAny writes an arbitrary field.
func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error {
v = reflect.Indirect(v)
@@ -519,8 +521,8 @@ func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Propert
// mutating this value.
v = v.Addr()
}
- if etm, ok := v.Interface().(encoding.TextMarshaler); ok {
- text, err := etm.MarshalText()
+ if v.Type().Implements(textMarshalerType) {
+ text, err := v.Interface().(encoding.TextMarshaler).MarshalText()
if err != nil {
return err
}
diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
index e3c56d3ff..78ee52334 100644
--- a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
+++ b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
@@ -1,11 +1,13 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/protobuf/any.proto
-package any // import "github.com/golang/protobuf/ptypes/any"
+package any
-import proto "github.com/golang/protobuf/proto"
-import fmt "fmt"
-import math "math"
+import (
+ fmt "fmt"
+ proto "github.com/golang/protobuf/proto"
+ math "math"
+)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
@@ -16,7 +18,7 @@ var _ = math.Inf
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
// `Any` contains an arbitrary serialized protocol buffer message along with a
// URL that describes the type of the serialized message.
@@ -99,17 +101,18 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// }
//
type Any struct {
- // A URL/resource name whose content describes the type of the
- // serialized protocol buffer message.
+ // A URL/resource name that uniquely identifies the type of the serialized
+ // protocol buffer message. The last segment of the URL's path must represent
+ // the fully qualified name of the type (as in
+ // `path/google.protobuf.Duration`). The name should be in a canonical form
+ // (e.g., leading "." is not accepted).
//
- // For URLs which use the scheme `http`, `https`, or no scheme, the
- // following restrictions and interpretations apply:
+ // In practice, teams usually precompile into the binary all types that they
+ // expect it to use in the context of Any. However, for URLs which use the
+ // scheme `http`, `https`, or no scheme, one can optionally set up a type
+ // server that maps type URLs to message definitions as follows:
//
// * If no scheme is provided, `https` is assumed.
- // * The last segment of the URL's path must represent the fully
- // qualified name of the type (as in `path/google.protobuf.Duration`).
- // The name should be in a canonical form (e.g., leading "." is
- // not accepted).
// * An HTTP GET on the URL must yield a [google.protobuf.Type][]
// value in binary format, or produce an error.
// * Applications are allowed to cache lookup results based on the
@@ -118,6 +121,10 @@ type Any struct {
// on changes to types. (Use versioned type names to manage
// breaking changes.)
//
+ // Note: this functionality is not currently available in the official
+ // protobuf release, and it is not used for type URLs beginning with
+ // type.googleapis.com.
+ //
// Schemes other than `http`, `https` (or the empty scheme) might be
// used with implementation specific semantics.
//
@@ -133,17 +140,19 @@ func (m *Any) Reset() { *m = Any{} }
func (m *Any) String() string { return proto.CompactTextString(m) }
func (*Any) ProtoMessage() {}
func (*Any) Descriptor() ([]byte, []int) {
- return fileDescriptor_any_744b9ca530f228db, []int{0}
+ return fileDescriptor_b53526c13ae22eb4, []int{0}
}
+
func (*Any) XXX_WellKnownType() string { return "Any" }
+
func (m *Any) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Any.Unmarshal(m, b)
}
func (m *Any) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Any.Marshal(b, m, deterministic)
}
-func (dst *Any) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Any.Merge(dst, src)
+func (m *Any) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Any.Merge(m, src)
}
func (m *Any) XXX_Size() int {
return xxx_messageInfo_Any.Size(m)
@@ -172,9 +181,9 @@ func init() {
proto.RegisterType((*Any)(nil), "google.protobuf.Any")
}
-func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor_any_744b9ca530f228db) }
+func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor_b53526c13ae22eb4) }
-var fileDescriptor_any_744b9ca530f228db = []byte{
+var fileDescriptor_b53526c13ae22eb4 = []byte{
// 185 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4c, 0xcf, 0xcf, 0x4f,
0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcc, 0xab, 0xd4,
diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.proto b/vendor/github.com/golang/protobuf/ptypes/any/any.proto
index c74866762..493294255 100644
--- a/vendor/github.com/golang/protobuf/ptypes/any/any.proto
+++ b/vendor/github.com/golang/protobuf/ptypes/any/any.proto
@@ -120,17 +120,18 @@ option objc_class_prefix = "GPB";
// }
//
message Any {
- // A URL/resource name whose content describes the type of the
- // serialized protocol buffer message.
+ // A URL/resource name that uniquely identifies the type of the serialized
+ // protocol buffer message. The last segment of the URL's path must represent
+ // the fully qualified name of the type (as in
+ // `path/google.protobuf.Duration`). The name should be in a canonical form
+ // (e.g., leading "." is not accepted).
//
- // For URLs which use the scheme `http`, `https`, or no scheme, the
- // following restrictions and interpretations apply:
+ // In practice, teams usually precompile into the binary all types that they
+ // expect it to use in the context of Any. However, for URLs which use the
+ // scheme `http`, `https`, or no scheme, one can optionally set up a type
+ // server that maps type URLs to message definitions as follows:
//
// * If no scheme is provided, `https` is assumed.
- // * The last segment of the URL's path must represent the fully
- // qualified name of the type (as in `path/google.protobuf.Duration`).
- // The name should be in a canonical form (e.g., leading "." is
- // not accepted).
// * An HTTP GET on the URL must yield a [google.protobuf.Type][]
// value in binary format, or produce an error.
// * Applications are allowed to cache lookup results based on the
@@ -139,6 +140,10 @@ message Any {
// on changes to types. (Use versioned type names to manage
// breaking changes.)
//
+ // Note: this functionality is not currently available in the official
+ // protobuf release, and it is not used for type URLs beginning with
+ // type.googleapis.com.
+ //
// Schemes other than `http`, `https` (or the empty scheme) might be
// used with implementation specific semantics.
//
diff --git a/vendor/github.com/golang/protobuf/ptypes/duration.go b/vendor/github.com/golang/protobuf/ptypes/duration.go
index 65cb0f8eb..26d1ca2fb 100644
--- a/vendor/github.com/golang/protobuf/ptypes/duration.go
+++ b/vendor/github.com/golang/protobuf/ptypes/duration.go
@@ -82,7 +82,7 @@ func Duration(p *durpb.Duration) (time.Duration, error) {
return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p)
}
if p.Nanos != 0 {
- d += time.Duration(p.Nanos)
+ d += time.Duration(p.Nanos) * time.Nanosecond
if (d < 0) != (p.Nanos < 0) {
return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p)
}
diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
index a7beb2c41..0d681ee21 100644
--- a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
+++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
@@ -1,11 +1,13 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/protobuf/duration.proto
-package duration // import "github.com/golang/protobuf/ptypes/duration"
+package duration
-import proto "github.com/golang/protobuf/proto"
-import fmt "fmt"
-import math "math"
+import (
+ fmt "fmt"
+ proto "github.com/golang/protobuf/proto"
+ math "math"
+)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
@@ -16,7 +18,7 @@ var _ = math.Inf
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
// A Duration represents a signed, fixed-length span of time represented
// as a count of seconds and fractions of seconds at nanosecond
@@ -99,17 +101,19 @@ func (m *Duration) Reset() { *m = Duration{} }
func (m *Duration) String() string { return proto.CompactTextString(m) }
func (*Duration) ProtoMessage() {}
func (*Duration) Descriptor() ([]byte, []int) {
- return fileDescriptor_duration_e7d612259e3f0613, []int{0}
+ return fileDescriptor_23597b2ebd7ac6c5, []int{0}
}
+
func (*Duration) XXX_WellKnownType() string { return "Duration" }
+
func (m *Duration) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Duration.Unmarshal(m, b)
}
func (m *Duration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Duration.Marshal(b, m, deterministic)
}
-func (dst *Duration) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Duration.Merge(dst, src)
+func (m *Duration) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Duration.Merge(m, src)
}
func (m *Duration) XXX_Size() int {
return xxx_messageInfo_Duration.Size(m)
@@ -138,11 +142,9 @@ func init() {
proto.RegisterType((*Duration)(nil), "google.protobuf.Duration")
}
-func init() {
- proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor_duration_e7d612259e3f0613)
-}
+func init() { proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor_23597b2ebd7ac6c5) }
-var fileDescriptor_duration_e7d612259e3f0613 = []byte{
+var fileDescriptor_23597b2ebd7ac6c5 = []byte{
// 190 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f,
0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0x29, 0x2d, 0x4a,
diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp.go b/vendor/github.com/golang/protobuf/ptypes/timestamp.go
index 47f10dbc2..8da0df01a 100644
--- a/vendor/github.com/golang/protobuf/ptypes/timestamp.go
+++ b/vendor/github.com/golang/protobuf/ptypes/timestamp.go
@@ -111,11 +111,9 @@ func TimestampNow() *tspb.Timestamp {
// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto.
// It returns an error if the resulting Timestamp is invalid.
func TimestampProto(t time.Time) (*tspb.Timestamp, error) {
- seconds := t.Unix()
- nanos := int32(t.Sub(time.Unix(seconds, 0)))
ts := &tspb.Timestamp{
- Seconds: seconds,
- Nanos: nanos,
+ Seconds: t.Unix(),
+ Nanos: int32(t.Nanosecond()),
}
if err := validateTimestamp(ts); err != nil {
return nil, err
diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
index 8e76ae976..31cd846de 100644
--- a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
+++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
@@ -1,11 +1,13 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/protobuf/timestamp.proto
-package timestamp // import "github.com/golang/protobuf/ptypes/timestamp"
+package timestamp
-import proto "github.com/golang/protobuf/proto"
-import fmt "fmt"
-import math "math"
+import (
+ fmt "fmt"
+ proto "github.com/golang/protobuf/proto"
+ math "math"
+)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
@@ -16,7 +18,7 @@ var _ = math.Inf
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
// A Timestamp represents a point in time independent of any time zone
// or calendar, represented as seconds and fractions of seconds at
@@ -81,7 +83,9 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional
// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution),
// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone
-// is required, though only UTC (as indicated by "Z") is presently supported.
+// is required. A proto3 JSON serializer should always use UTC (as indicated by
+// "Z") when printing the Timestamp type and a proto3 JSON parser should be
+// able to accept both UTC and other timezones (as indicated by an offset).
//
// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past
// 01:30 UTC on January 15, 2017.
@@ -92,8 +96,8 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime)
// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one
// can use the Joda Time's [`ISODateTimeFormat.dateTime()`](
-// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--)
-// to obtain a formatter capable of generating timestamps in this format.
+// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--
+// ) to obtain a formatter capable of generating timestamps in this format.
//
//
type Timestamp struct {
@@ -115,17 +119,19 @@ func (m *Timestamp) Reset() { *m = Timestamp{} }
func (m *Timestamp) String() string { return proto.CompactTextString(m) }
func (*Timestamp) ProtoMessage() {}
func (*Timestamp) Descriptor() ([]byte, []int) {
- return fileDescriptor_timestamp_b826e8e5fba671a8, []int{0}
+ return fileDescriptor_292007bbfe81227e, []int{0}
}
+
func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" }
+
func (m *Timestamp) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Timestamp.Unmarshal(m, b)
}
func (m *Timestamp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Timestamp.Marshal(b, m, deterministic)
}
-func (dst *Timestamp) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Timestamp.Merge(dst, src)
+func (m *Timestamp) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Timestamp.Merge(m, src)
}
func (m *Timestamp) XXX_Size() int {
return xxx_messageInfo_Timestamp.Size(m)
@@ -154,11 +160,9 @@ func init() {
proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp")
}
-func init() {
- proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor_timestamp_b826e8e5fba671a8)
-}
+func init() { proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor_292007bbfe81227e) }
-var fileDescriptor_timestamp_b826e8e5fba671a8 = []byte{
+var fileDescriptor_292007bbfe81227e = []byte{
// 191 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0xcf, 0xcf, 0x4f,
0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xc9, 0xcc, 0x4d,
diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
index 06750ab1f..eafb3fa03 100644
--- a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
+++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
@@ -103,7 +103,9 @@ option objc_class_prefix = "GPB";
// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional
// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution),
// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone
-// is required, though only UTC (as indicated by "Z") is presently supported.
+// is required. A proto3 JSON serializer should always use UTC (as indicated by
+// "Z") when printing the Timestamp type and a proto3 JSON parser should be
+// able to accept both UTC and other timezones (as indicated by an offset).
//
// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past
// 01:30 UTC on January 15, 2017.
@@ -114,8 +116,8 @@ option objc_class_prefix = "GPB";
// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime)
// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one
// can use the Joda Time's [`ISODateTimeFormat.dateTime()`](
-// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--)
-// to obtain a formatter capable of generating timestamps in this format.
+// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--
+// ) to obtain a formatter capable of generating timestamps in this format.
//
//
message Timestamp {
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/README.md b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/README.md
index 68c8c46a8..499c58355 100644
--- a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/README.md
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/README.md
@@ -3,6 +3,8 @@
[![Travis Build](https://travis-ci.org/grpc-ecosystem/go-grpc-prometheus.svg)](https://travis-ci.org/grpc-ecosystem/go-grpc-prometheus)
[![Go Report Card](https://goreportcard.com/badge/github.com/grpc-ecosystem/go-grpc-prometheus)](http://goreportcard.com/report/grpc-ecosystem/go-grpc-prometheus)
[![GoDoc](http://img.shields.io/badge/GoDoc-Reference-blue.svg)](https://godoc.org/github.com/grpc-ecosystem/go-grpc-prometheus)
+[![SourceGraph](https://sourcegraph.com/github.com/grpc-ecosystem/go-grpc-prometheus/-/badge.svg)](https://sourcegraph.com/github.com/grpc-ecosystem/go-grpc-prometheus/?badge)
+[![codecov](https://codecov.io/gh/grpc-ecosystem/go-grpc-prometheus/branch/master/graph/badge.svg)](https://codecov.io/gh/grpc-ecosystem/go-grpc-prometheus)
[![Apache 2.0 License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](LICENSE)
[Prometheus](https://prometheus.io/) monitoring for your [gRPC Go](https://github.com/grpc/grpc-go) servers and clients.
@@ -36,7 +38,7 @@ import "github.com/grpc-ecosystem/go-grpc-prometheus"
// After all your registrations, make sure all of the Prometheus metrics are initialized.
grpc_prometheus.Register(myServer)
// Register Prometheus metrics handler.
- http.Handle("/metrics", prometheus.Handler())
+ http.Handle("/metrics", promhttp.Handler())
...
```
@@ -47,8 +49,8 @@ import "github.com/grpc-ecosystem/go-grpc-prometheus"
...
clientConn, err = grpc.Dial(
address,
- grpc.WithUnaryInterceptor(UnaryClientInterceptor),
- grpc.WithStreamInterceptor(StreamClientInterceptor)
+ grpc.WithUnaryInterceptor(grpc_prometheus.UnaryClientInterceptor),
+ grpc.WithStreamInterceptor(grpc_prometheus.StreamClientInterceptor)
)
client = pb_testproto.NewTestServiceClient(clientConn)
resp, err := client.PingEmpty(s.ctx, &myservice.Request{Msg: "hello"})
@@ -116,7 +118,7 @@ each of the 20 messages sent back, a counter will be incremented:
grpc_server_msg_sent_total{grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream"} 20
```
-After the call completes, it's status (`OK` or other [gRPC status code](https://github.com/grpc/grpc-go/blob/master/codes/codes.go))
+After the call completes, its status (`OK` or other [gRPC status code](https://github.com/grpc/grpc-go/blob/master/codes/codes.go))
and the relevant call labels increment the `grpc_server_handled_total` counter.
```jsoniq
@@ -126,8 +128,8 @@ grpc_server_handled_total{grpc_code="OK",grpc_method="PingList",grpc_service="mw
## Histograms
[Prometheus histograms](https://prometheus.io/docs/concepts/metric_types/#histogram) are a great way
-to measure latency distributions of your RPCs. However since it is bad practice to have metrics
-of [high cardinality](https://prometheus.io/docs/practices/instrumentation/#do-not-overuse-labels))
+to measure latency distributions of your RPCs. However, since it is bad practice to have metrics
+of [high cardinality](https://prometheus.io/docs/practices/instrumentation/#do-not-overuse-labels)
the latency monitoring metrics are disabled by default. To enable them please call the following
in your server initialization code:
@@ -135,8 +137,8 @@ in your server initialization code:
grpc_prometheus.EnableHandlingTimeHistogram()
```
-After the call completes, it's handling time will be recorded in a [Prometheus histogram](https://prometheus.io/docs/concepts/metric_types/#histogram)
-variable `grpc_server_handling_seconds`. It contains three sub-metrics:
+After the call completes, its handling time will be recorded in a [Prometheus histogram](https://prometheus.io/docs/concepts/metric_types/#histogram)
+variable `grpc_server_handling_seconds`. The histogram variable contains three sub-metrics:
* `grpc_server_handling_seconds_count` - the count of all completed RPCs by status and method
* `grpc_server_handling_seconds_sum` - cumulative time of RPCs by status and method, useful for
@@ -166,7 +168,7 @@ grpc_server_handling_seconds_count{grpc_code="OK",grpc_method="PingList",grpc_se
## Useful query examples
-Prometheus philosophy is to provide the most detailed metrics possible to the monitoring system, and
+Prometheus philosophy is to provide raw metrics to the monitoring system, and
let the aggregations be handled there. The verbosity of above metrics make it possible to have that
flexibility. Here's a couple of useful monitoring queries:
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client.go b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client.go
index d9e87b2f7..751a4c72d 100644
--- a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client.go
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client.go
@@ -6,67 +6,34 @@
package grpc_prometheus
import (
- "io"
-
- "golang.org/x/net/context"
- "google.golang.org/grpc"
- "google.golang.org/grpc/codes"
+ prom "github.com/prometheus/client_golang/prometheus"
)
-// UnaryClientInterceptor is a gRPC client-side interceptor that provides Prometheus monitoring for Unary RPCs.
-func UnaryClientInterceptor(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
- monitor := newClientReporter(Unary, method)
- monitor.SentMessage()
- err := invoker(ctx, method, req, reply, cc, opts...)
- if err != nil {
- monitor.ReceivedMessage()
- }
- monitor.Handled(grpc.Code(err))
- return err
-}
+var (
+ // DefaultClientMetrics is the default instance of ClientMetrics. It is
+ // intended to be used in conjunction the default Prometheus metrics
+ // registry.
+ DefaultClientMetrics = NewClientMetrics()
-// StreamServerInterceptor is a gRPC client-side interceptor that provides Prometheus monitoring for Streaming RPCs.
-func StreamClientInterceptor(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
- monitor := newClientReporter(clientStreamType(desc), method)
- clientStream, err := streamer(ctx, desc, cc, method, opts...)
- if err != nil {
- monitor.Handled(grpc.Code(err))
- return nil, err
- }
- return &monitoredClientStream{clientStream, monitor}, nil
-}
+ // UnaryClientInterceptor is a gRPC client-side interceptor that provides Prometheus monitoring for Unary RPCs.
+ UnaryClientInterceptor = DefaultClientMetrics.UnaryClientInterceptor()
-func clientStreamType(desc *grpc.StreamDesc) grpcType {
- if desc.ClientStreams && !desc.ServerStreams {
- return ClientStream
- } else if !desc.ClientStreams && desc.ServerStreams {
- return ServerStream
- }
- return BidiStream
-}
-
-// monitoredClientStream wraps grpc.ClientStream allowing each Sent/Recv of message to increment counters.
-type monitoredClientStream struct {
- grpc.ClientStream
- monitor *clientReporter
-}
+ // StreamClientInterceptor is a gRPC client-side interceptor that provides Prometheus monitoring for Streaming RPCs.
+ StreamClientInterceptor = DefaultClientMetrics.StreamClientInterceptor()
+)
-func (s *monitoredClientStream) SendMsg(m interface{}) error {
- err := s.ClientStream.SendMsg(m)
- if err == nil {
- s.monitor.SentMessage()
- }
- return err
+func init() {
+ prom.MustRegister(DefaultClientMetrics.clientStartedCounter)
+ prom.MustRegister(DefaultClientMetrics.clientHandledCounter)
+ prom.MustRegister(DefaultClientMetrics.clientStreamMsgReceived)
+ prom.MustRegister(DefaultClientMetrics.clientStreamMsgSent)
}
-func (s *monitoredClientStream) RecvMsg(m interface{}) error {
- err := s.ClientStream.RecvMsg(m)
- if err == nil {
- s.monitor.ReceivedMessage()
- } else if err == io.EOF {
- s.monitor.Handled(codes.OK)
- } else {
- s.monitor.Handled(grpc.Code(err))
- }
- return err
+// EnableClientHandlingTimeHistogram turns on recording of handling time of
+// RPCs. Histogram metrics can be very expensive for Prometheus to retain and
+// query. This function acts on the DefaultClientMetrics variable and the
+// default Prometheus metrics registry.
+func EnableClientHandlingTimeHistogram(opts ...HistogramOption) {
+ DefaultClientMetrics.EnableClientHandlingTimeHistogram(opts...)
+ prom.Register(DefaultClientMetrics.clientHandledHistogram)
}
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client_metrics.go b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client_metrics.go
new file mode 100644
index 000000000..9b476f983
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client_metrics.go
@@ -0,0 +1,170 @@
+package grpc_prometheus
+
+import (
+ "io"
+
+ prom "github.com/prometheus/client_golang/prometheus"
+ "golang.org/x/net/context"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
+)
+
+// ClientMetrics represents a collection of metrics to be registered on a
+// Prometheus metrics registry for a gRPC client.
+type ClientMetrics struct {
+ clientStartedCounter *prom.CounterVec
+ clientHandledCounter *prom.CounterVec
+ clientStreamMsgReceived *prom.CounterVec
+ clientStreamMsgSent *prom.CounterVec
+ clientHandledHistogramEnabled bool
+ clientHandledHistogramOpts prom.HistogramOpts
+ clientHandledHistogram *prom.HistogramVec
+}
+
+// NewClientMetrics returns a ClientMetrics object. Use a new instance of
+// ClientMetrics when not using the default Prometheus metrics registry, for
+// example when wanting to control which metrics are added to a registry as
+// opposed to automatically adding metrics via init functions.
+func NewClientMetrics(counterOpts ...CounterOption) *ClientMetrics {
+ opts := counterOptions(counterOpts)
+ return &ClientMetrics{
+ clientStartedCounter: prom.NewCounterVec(
+ opts.apply(prom.CounterOpts{
+ Name: "grpc_client_started_total",
+ Help: "Total number of RPCs started on the client.",
+ }), []string{"grpc_type", "grpc_service", "grpc_method"}),
+
+ clientHandledCounter: prom.NewCounterVec(
+ opts.apply(prom.CounterOpts{
+ Name: "grpc_client_handled_total",
+ Help: "Total number of RPCs completed by the client, regardless of success or failure.",
+ }), []string{"grpc_type", "grpc_service", "grpc_method", "grpc_code"}),
+
+ clientStreamMsgReceived: prom.NewCounterVec(
+ opts.apply(prom.CounterOpts{
+ Name: "grpc_client_msg_received_total",
+ Help: "Total number of RPC stream messages received by the client.",
+ }), []string{"grpc_type", "grpc_service", "grpc_method"}),
+
+ clientStreamMsgSent: prom.NewCounterVec(
+ opts.apply(prom.CounterOpts{
+ Name: "grpc_client_msg_sent_total",
+ Help: "Total number of gRPC stream messages sent by the client.",
+ }), []string{"grpc_type", "grpc_service", "grpc_method"}),
+
+ clientHandledHistogramEnabled: false,
+ clientHandledHistogramOpts: prom.HistogramOpts{
+ Name: "grpc_client_handling_seconds",
+ Help: "Histogram of response latency (seconds) of the gRPC until it is finished by the application.",
+ Buckets: prom.DefBuckets,
+ },
+ clientHandledHistogram: nil,
+ }
+}
+
+// Describe sends the super-set of all possible descriptors of metrics
+// collected by this Collector to the provided channel and returns once
+// the last descriptor has been sent.
+func (m *ClientMetrics) Describe(ch chan<- *prom.Desc) {
+ m.clientStartedCounter.Describe(ch)
+ m.clientHandledCounter.Describe(ch)
+ m.clientStreamMsgReceived.Describe(ch)
+ m.clientStreamMsgSent.Describe(ch)
+ if m.clientHandledHistogramEnabled {
+ m.clientHandledHistogram.Describe(ch)
+ }
+}
+
+// Collect is called by the Prometheus registry when collecting
+// metrics. The implementation sends each collected metric via the
+// provided channel and returns once the last metric has been sent.
+func (m *ClientMetrics) Collect(ch chan<- prom.Metric) {
+ m.clientStartedCounter.Collect(ch)
+ m.clientHandledCounter.Collect(ch)
+ m.clientStreamMsgReceived.Collect(ch)
+ m.clientStreamMsgSent.Collect(ch)
+ if m.clientHandledHistogramEnabled {
+ m.clientHandledHistogram.Collect(ch)
+ }
+}
+
+// EnableClientHandlingTimeHistogram turns on recording of handling time of RPCs.
+// Histogram metrics can be very expensive for Prometheus to retain and query.
+func (m *ClientMetrics) EnableClientHandlingTimeHistogram(opts ...HistogramOption) {
+ for _, o := range opts {
+ o(&m.clientHandledHistogramOpts)
+ }
+ if !m.clientHandledHistogramEnabled {
+ m.clientHandledHistogram = prom.NewHistogramVec(
+ m.clientHandledHistogramOpts,
+ []string{"grpc_type", "grpc_service", "grpc_method"},
+ )
+ }
+ m.clientHandledHistogramEnabled = true
+}
+
+// UnaryClientInterceptor is a gRPC client-side interceptor that provides Prometheus monitoring for Unary RPCs.
+func (m *ClientMetrics) UnaryClientInterceptor() func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
+ return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
+ monitor := newClientReporter(m, Unary, method)
+ monitor.SentMessage()
+ err := invoker(ctx, method, req, reply, cc, opts...)
+ if err != nil {
+ monitor.ReceivedMessage()
+ }
+ st, _ := status.FromError(err)
+ monitor.Handled(st.Code())
+ return err
+ }
+}
+
+// StreamClientInterceptor is a gRPC client-side interceptor that provides Prometheus monitoring for Streaming RPCs.
+func (m *ClientMetrics) StreamClientInterceptor() func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
+ return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
+ monitor := newClientReporter(m, clientStreamType(desc), method)
+ clientStream, err := streamer(ctx, desc, cc, method, opts...)
+ if err != nil {
+ st, _ := status.FromError(err)
+ monitor.Handled(st.Code())
+ return nil, err
+ }
+ return &monitoredClientStream{clientStream, monitor}, nil
+ }
+}
+
+func clientStreamType(desc *grpc.StreamDesc) grpcType {
+ if desc.ClientStreams && !desc.ServerStreams {
+ return ClientStream
+ } else if !desc.ClientStreams && desc.ServerStreams {
+ return ServerStream
+ }
+ return BidiStream
+}
+
+// monitoredClientStream wraps grpc.ClientStream allowing each Sent/Recv of message to increment counters.
+type monitoredClientStream struct {
+ grpc.ClientStream
+ monitor *clientReporter
+}
+
+func (s *monitoredClientStream) SendMsg(m interface{}) error {
+ err := s.ClientStream.SendMsg(m)
+ if err == nil {
+ s.monitor.SentMessage()
+ }
+ return err
+}
+
+func (s *monitoredClientStream) RecvMsg(m interface{}) error {
+ err := s.ClientStream.RecvMsg(m)
+ if err == nil {
+ s.monitor.ReceivedMessage()
+ } else if err == io.EOF {
+ s.monitor.Handled(codes.OK)
+ } else {
+ st, _ := status.FromError(err)
+ s.monitor.Handled(st.Code())
+ }
+ return err
+}
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client_reporter.go b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client_reporter.go
index 16b761553..cbf153229 100644
--- a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client_reporter.go
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client_reporter.go
@@ -7,105 +7,40 @@ import (
"time"
"google.golang.org/grpc/codes"
-
- prom "github.com/prometheus/client_golang/prometheus"
-)
-
-var (
- clientStartedCounter = prom.NewCounterVec(
- prom.CounterOpts{
- Namespace: "grpc",
- Subsystem: "client",
- Name: "started_total",
- Help: "Total number of RPCs started on the client.",
- }, []string{"grpc_type", "grpc_service", "grpc_method"})
-
- clientHandledCounter = prom.NewCounterVec(
- prom.CounterOpts{
- Namespace: "grpc",
- Subsystem: "client",
- Name: "handled_total",
- Help: "Total number of RPCs completed by the client, regardless of success or failure.",
- }, []string{"grpc_type", "grpc_service", "grpc_method", "grpc_code"})
-
- clientStreamMsgReceived = prom.NewCounterVec(
- prom.CounterOpts{
- Namespace: "grpc",
- Subsystem: "client",
- Name: "msg_received_total",
- Help: "Total number of RPC stream messages received by the client.",
- }, []string{"grpc_type", "grpc_service", "grpc_method"})
-
- clientStreamMsgSent = prom.NewCounterVec(
- prom.CounterOpts{
- Namespace: "grpc",
- Subsystem: "client",
- Name: "msg_sent_total",
- Help: "Total number of gRPC stream messages sent by the client.",
- }, []string{"grpc_type", "grpc_service", "grpc_method"})
-
- clientHandledHistogramEnabled = false
- clientHandledHistogramOpts = prom.HistogramOpts{
- Namespace: "grpc",
- Subsystem: "client",
- Name: "handling_seconds",
- Help: "Histogram of response latency (seconds) of the gRPC until it is finished by the application.",
- Buckets: prom.DefBuckets,
- }
- clientHandledHistogram *prom.HistogramVec
)
-func init() {
- prom.MustRegister(clientStartedCounter)
- prom.MustRegister(clientHandledCounter)
- prom.MustRegister(clientStreamMsgReceived)
- prom.MustRegister(clientStreamMsgSent)
-}
-
-// EnableClientHandlingTimeHistogram turns on recording of handling time of RPCs.
-// Histogram metrics can be very expensive for Prometheus to retain and query.
-func EnableClientHandlingTimeHistogram(opts ...HistogramOption) {
- for _, o := range opts {
- o(&clientHandledHistogramOpts)
- }
- if !clientHandledHistogramEnabled {
- clientHandledHistogram = prom.NewHistogramVec(
- clientHandledHistogramOpts,
- []string{"grpc_type", "grpc_service", "grpc_method"},
- )
- prom.Register(clientHandledHistogram)
- }
- clientHandledHistogramEnabled = true
-}
-
type clientReporter struct {
+ metrics *ClientMetrics
rpcType grpcType
serviceName string
methodName string
startTime time.Time
}
-func newClientReporter(rpcType grpcType, fullMethod string) *clientReporter {
- r := &clientReporter{rpcType: rpcType}
- if clientHandledHistogramEnabled {
+func newClientReporter(m *ClientMetrics, rpcType grpcType, fullMethod string) *clientReporter {
+ r := &clientReporter{
+ metrics: m,
+ rpcType: rpcType,
+ }
+ if r.metrics.clientHandledHistogramEnabled {
r.startTime = time.Now()
}
r.serviceName, r.methodName = splitMethodName(fullMethod)
- clientStartedCounter.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc()
+ r.metrics.clientStartedCounter.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc()
return r
}
func (r *clientReporter) ReceivedMessage() {
- clientStreamMsgReceived.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc()
+ r.metrics.clientStreamMsgReceived.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc()
}
func (r *clientReporter) SentMessage() {
- clientStreamMsgSent.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc()
+ r.metrics.clientStreamMsgSent.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc()
}
func (r *clientReporter) Handled(code codes.Code) {
- clientHandledCounter.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName, code.String()).Inc()
- if clientHandledHistogramEnabled {
- clientHandledHistogram.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Observe(time.Since(r.startTime).Seconds())
+ r.metrics.clientHandledCounter.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName, code.String()).Inc()
+ if r.metrics.clientHandledHistogramEnabled {
+ r.metrics.clientHandledHistogram.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Observe(time.Since(r.startTime).Seconds())
}
}
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/metric_options.go b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/metric_options.go
new file mode 100644
index 000000000..9d51aec98
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/metric_options.go
@@ -0,0 +1,41 @@
+package grpc_prometheus
+
+import (
+ prom "github.com/prometheus/client_golang/prometheus"
+)
+
+// A CounterOption lets you add options to Counter metrics using With* funcs.
+type CounterOption func(*prom.CounterOpts)
+
+type counterOptions []CounterOption
+
+func (co counterOptions) apply(o prom.CounterOpts) prom.CounterOpts {
+ for _, f := range co {
+ f(&o)
+ }
+ return o
+}
+
+// WithConstLabels allows you to add ConstLabels to Counter metrics.
+func WithConstLabels(labels prom.Labels) CounterOption {
+ return func(o *prom.CounterOpts) {
+ o.ConstLabels = labels
+ }
+}
+
+// A HistogramOption lets you add options to Histogram metrics using With*
+// funcs.
+type HistogramOption func(*prom.HistogramOpts)
+
+// WithHistogramBuckets allows you to specify custom bucket ranges for histograms if EnableHandlingTimeHistogram is on.
+func WithHistogramBuckets(buckets []float64) HistogramOption {
+ return func(o *prom.HistogramOpts) { o.Buckets = buckets }
+}
+
+// WithHistogramConstLabels allows you to add custom ConstLabels to
+// histograms metrics.
+func WithHistogramConstLabels(labels prom.Labels) HistogramOption {
+ return func(o *prom.HistogramOpts) {
+ o.ConstLabels = labels
+ }
+}
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server.go b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server.go
index f85c8c237..322f99046 100644
--- a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server.go
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server.go
@@ -6,69 +6,43 @@
package grpc_prometheus
import (
- "golang.org/x/net/context"
+ prom "github.com/prometheus/client_golang/prometheus"
"google.golang.org/grpc"
)
-// PreregisterServices takes a gRPC server and pre-initializes all counters to 0.
-// This allows for easier monitoring in Prometheus (no missing metrics), and should be called *after* all services have
-// been registered with the server.
-func Register(server *grpc.Server) {
- serviceInfo := server.GetServiceInfo()
- for serviceName, info := range serviceInfo {
- for _, mInfo := range info.Methods {
- preRegisterMethod(serviceName, &mInfo)
- }
- }
-}
-
-// UnaryServerInterceptor is a gRPC server-side interceptor that provides Prometheus monitoring for Unary RPCs.
-func UnaryServerInterceptor(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
- monitor := newServerReporter(Unary, info.FullMethod)
- monitor.ReceivedMessage()
- resp, err := handler(ctx, req)
- monitor.Handled(grpc.Code(err))
- if err == nil {
- monitor.SentMessage()
- }
- return resp, err
-}
+var (
+ // DefaultServerMetrics is the default instance of ServerMetrics. It is
+ // intended to be used in conjunction the default Prometheus metrics
+ // registry.
+ DefaultServerMetrics = NewServerMetrics()
-// StreamServerInterceptor is a gRPC server-side interceptor that provides Prometheus monitoring for Streaming RPCs.
-func StreamServerInterceptor(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
- monitor := newServerReporter(streamRpcType(info), info.FullMethod)
- err := handler(srv, &monitoredServerStream{ss, monitor})
- monitor.Handled(grpc.Code(err))
- return err
-}
+ // UnaryServerInterceptor is a gRPC server-side interceptor that provides Prometheus monitoring for Unary RPCs.
+ UnaryServerInterceptor = DefaultServerMetrics.UnaryServerInterceptor()
-func streamRpcType(info *grpc.StreamServerInfo) grpcType {
- if info.IsClientStream && !info.IsServerStream {
- return ClientStream
- } else if !info.IsClientStream && info.IsServerStream {
- return ServerStream
- }
- return BidiStream
-}
+ // StreamServerInterceptor is a gRPC server-side interceptor that provides Prometheus monitoring for Streaming RPCs.
+ StreamServerInterceptor = DefaultServerMetrics.StreamServerInterceptor()
+)
-// monitoredStream wraps grpc.ServerStream allowing each Sent/Recv of message to increment counters.
-type monitoredServerStream struct {
- grpc.ServerStream
- monitor *serverReporter
+func init() {
+ prom.MustRegister(DefaultServerMetrics.serverStartedCounter)
+ prom.MustRegister(DefaultServerMetrics.serverHandledCounter)
+ prom.MustRegister(DefaultServerMetrics.serverStreamMsgReceived)
+ prom.MustRegister(DefaultServerMetrics.serverStreamMsgSent)
}
-func (s *monitoredServerStream) SendMsg(m interface{}) error {
- err := s.ServerStream.SendMsg(m)
- if err == nil {
- s.monitor.SentMessage()
- }
- return err
+// Register takes a gRPC server and pre-initializes all counters to 0. This
+// allows for easier monitoring in Prometheus (no missing metrics), and should
+// be called *after* all services have been registered with the server. This
+// function acts on the DefaultServerMetrics variable.
+func Register(server *grpc.Server) {
+ DefaultServerMetrics.InitializeMetrics(server)
}
-func (s *monitoredServerStream) RecvMsg(m interface{}) error {
- err := s.ServerStream.RecvMsg(m)
- if err == nil {
- s.monitor.ReceivedMessage()
- }
- return err
+// EnableHandlingTimeHistogram turns on recording of handling time
+// of RPCs. Histogram metrics can be very expensive for Prometheus
+// to retain and query. This function acts on the DefaultServerMetrics
+// variable and the default Prometheus metrics registry.
+func EnableHandlingTimeHistogram(opts ...HistogramOption) {
+ DefaultServerMetrics.EnableHandlingTimeHistogram(opts...)
+ prom.Register(DefaultServerMetrics.serverHandledHistogram)
}
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server_metrics.go b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server_metrics.go
new file mode 100644
index 000000000..5b1467e7a
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server_metrics.go
@@ -0,0 +1,185 @@
+package grpc_prometheus
+
+import (
+ prom "github.com/prometheus/client_golang/prometheus"
+ "golang.org/x/net/context"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/status"
+)
+
+// ServerMetrics represents a collection of metrics to be registered on a
+// Prometheus metrics registry for a gRPC server.
+type ServerMetrics struct {
+ serverStartedCounter *prom.CounterVec
+ serverHandledCounter *prom.CounterVec
+ serverStreamMsgReceived *prom.CounterVec
+ serverStreamMsgSent *prom.CounterVec
+ serverHandledHistogramEnabled bool
+ serverHandledHistogramOpts prom.HistogramOpts
+ serverHandledHistogram *prom.HistogramVec
+}
+
+// NewServerMetrics returns a ServerMetrics object. Use a new instance of
+// ServerMetrics when not using the default Prometheus metrics registry, for
+// example when wanting to control which metrics are added to a registry as
+// opposed to automatically adding metrics via init functions.
+func NewServerMetrics(counterOpts ...CounterOption) *ServerMetrics {
+ opts := counterOptions(counterOpts)
+ return &ServerMetrics{
+ serverStartedCounter: prom.NewCounterVec(
+ opts.apply(prom.CounterOpts{
+ Name: "grpc_server_started_total",
+ Help: "Total number of RPCs started on the server.",
+ }), []string{"grpc_type", "grpc_service", "grpc_method"}),
+ serverHandledCounter: prom.NewCounterVec(
+ opts.apply(prom.CounterOpts{
+ Name: "grpc_server_handled_total",
+ Help: "Total number of RPCs completed on the server, regardless of success or failure.",
+ }), []string{"grpc_type", "grpc_service", "grpc_method", "grpc_code"}),
+ serverStreamMsgReceived: prom.NewCounterVec(
+ opts.apply(prom.CounterOpts{
+ Name: "grpc_server_msg_received_total",
+ Help: "Total number of RPC stream messages received on the server.",
+ }), []string{"grpc_type", "grpc_service", "grpc_method"}),
+ serverStreamMsgSent: prom.NewCounterVec(
+ opts.apply(prom.CounterOpts{
+ Name: "grpc_server_msg_sent_total",
+ Help: "Total number of gRPC stream messages sent by the server.",
+ }), []string{"grpc_type", "grpc_service", "grpc_method"}),
+ serverHandledHistogramEnabled: false,
+ serverHandledHistogramOpts: prom.HistogramOpts{
+ Name: "grpc_server_handling_seconds",
+ Help: "Histogram of response latency (seconds) of gRPC that had been application-level handled by the server.",
+ Buckets: prom.DefBuckets,
+ },
+ serverHandledHistogram: nil,
+ }
+}
+
+// EnableHandlingTimeHistogram enables histograms being registered when
+// registering the ServerMetrics on a Prometheus registry. Histograms can be
+// expensive on Prometheus servers. It takes options to configure histogram
+// options such as the defined buckets.
+func (m *ServerMetrics) EnableHandlingTimeHistogram(opts ...HistogramOption) {
+ for _, o := range opts {
+ o(&m.serverHandledHistogramOpts)
+ }
+ if !m.serverHandledHistogramEnabled {
+ m.serverHandledHistogram = prom.NewHistogramVec(
+ m.serverHandledHistogramOpts,
+ []string{"grpc_type", "grpc_service", "grpc_method"},
+ )
+ }
+ m.serverHandledHistogramEnabled = true
+}
+
+// Describe sends the super-set of all possible descriptors of metrics
+// collected by this Collector to the provided channel and returns once
+// the last descriptor has been sent.
+func (m *ServerMetrics) Describe(ch chan<- *prom.Desc) {
+ m.serverStartedCounter.Describe(ch)
+ m.serverHandledCounter.Describe(ch)
+ m.serverStreamMsgReceived.Describe(ch)
+ m.serverStreamMsgSent.Describe(ch)
+ if m.serverHandledHistogramEnabled {
+ m.serverHandledHistogram.Describe(ch)
+ }
+}
+
+// Collect is called by the Prometheus registry when collecting
+// metrics. The implementation sends each collected metric via the
+// provided channel and returns once the last metric has been sent.
+func (m *ServerMetrics) Collect(ch chan<- prom.Metric) {
+ m.serverStartedCounter.Collect(ch)
+ m.serverHandledCounter.Collect(ch)
+ m.serverStreamMsgReceived.Collect(ch)
+ m.serverStreamMsgSent.Collect(ch)
+ if m.serverHandledHistogramEnabled {
+ m.serverHandledHistogram.Collect(ch)
+ }
+}
+
+// UnaryServerInterceptor is a gRPC server-side interceptor that provides Prometheus monitoring for Unary RPCs.
+func (m *ServerMetrics) UnaryServerInterceptor() func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
+ return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
+ monitor := newServerReporter(m, Unary, info.FullMethod)
+ monitor.ReceivedMessage()
+ resp, err := handler(ctx, req)
+ st, _ := status.FromError(err)
+ monitor.Handled(st.Code())
+ if err == nil {
+ monitor.SentMessage()
+ }
+ return resp, err
+ }
+}
+
+// StreamServerInterceptor is a gRPC server-side interceptor that provides Prometheus monitoring for Streaming RPCs.
+func (m *ServerMetrics) StreamServerInterceptor() func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
+ return func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
+ monitor := newServerReporter(m, streamRPCType(info), info.FullMethod)
+ err := handler(srv, &monitoredServerStream{ss, monitor})
+ st, _ := status.FromError(err)
+ monitor.Handled(st.Code())
+ return err
+ }
+}
+
+// InitializeMetrics initializes all metrics, with their appropriate null
+// value, for all gRPC methods registered on a gRPC server. This is useful, to
+// ensure that all metrics exist when collecting and querying.
+func (m *ServerMetrics) InitializeMetrics(server *grpc.Server) {
+ serviceInfo := server.GetServiceInfo()
+ for serviceName, info := range serviceInfo {
+ for _, mInfo := range info.Methods {
+ preRegisterMethod(m, serviceName, &mInfo)
+ }
+ }
+}
+
+func streamRPCType(info *grpc.StreamServerInfo) grpcType {
+ if info.IsClientStream && !info.IsServerStream {
+ return ClientStream
+ } else if !info.IsClientStream && info.IsServerStream {
+ return ServerStream
+ }
+ return BidiStream
+}
+
+// monitoredStream wraps grpc.ServerStream allowing each Sent/Recv of message to increment counters.
+type monitoredServerStream struct {
+ grpc.ServerStream
+ monitor *serverReporter
+}
+
+func (s *monitoredServerStream) SendMsg(m interface{}) error {
+ err := s.ServerStream.SendMsg(m)
+ if err == nil {
+ s.monitor.SentMessage()
+ }
+ return err
+}
+
+func (s *monitoredServerStream) RecvMsg(m interface{}) error {
+ err := s.ServerStream.RecvMsg(m)
+ if err == nil {
+ s.monitor.ReceivedMessage()
+ }
+ return err
+}
+
+// preRegisterMethod is invoked on Register of a Server, allowing all gRPC services labels to be pre-populated.
+func preRegisterMethod(metrics *ServerMetrics, serviceName string, mInfo *grpc.MethodInfo) {
+ methodName := mInfo.Name
+ methodType := string(typeFromMethodInfo(mInfo))
+ // These are just references (no increments), as just referencing will create the labels but not set values.
+ metrics.serverStartedCounter.GetMetricWithLabelValues(methodType, serviceName, methodName)
+ metrics.serverStreamMsgReceived.GetMetricWithLabelValues(methodType, serviceName, methodName)
+ metrics.serverStreamMsgSent.GetMetricWithLabelValues(methodType, serviceName, methodName)
+ if metrics.serverHandledHistogramEnabled {
+ metrics.serverHandledHistogram.GetMetricWithLabelValues(methodType, serviceName, methodName)
+ }
+ for _, code := range allCodes {
+ metrics.serverHandledCounter.GetMetricWithLabelValues(methodType, serviceName, methodName, code.String())
+ }
+}
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server_reporter.go b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server_reporter.go
index 628a89056..aa9db5401 100644
--- a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server_reporter.go
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server_reporter.go
@@ -7,151 +7,40 @@ import (
"time"
"google.golang.org/grpc/codes"
-
- prom "github.com/prometheus/client_golang/prometheus"
- "google.golang.org/grpc"
-)
-
-type grpcType string
-
-const (
- Unary grpcType = "unary"
- ClientStream grpcType = "client_stream"
- ServerStream grpcType = "server_stream"
- BidiStream grpcType = "bidi_stream"
)
-var (
- serverStartedCounter = prom.NewCounterVec(
- prom.CounterOpts{
- Namespace: "grpc",
- Subsystem: "server",
- Name: "started_total",
- Help: "Total number of RPCs started on the server.",
- }, []string{"grpc_type", "grpc_service", "grpc_method"})
-
- serverHandledCounter = prom.NewCounterVec(
- prom.CounterOpts{
- Namespace: "grpc",
- Subsystem: "server",
- Name: "handled_total",
- Help: "Total number of RPCs completed on the server, regardless of success or failure.",
- }, []string{"grpc_type", "grpc_service", "grpc_method", "grpc_code"})
-
- serverStreamMsgReceived = prom.NewCounterVec(
- prom.CounterOpts{
- Namespace: "grpc",
- Subsystem: "server",
- Name: "msg_received_total",
- Help: "Total number of RPC stream messages received on the server.",
- }, []string{"grpc_type", "grpc_service", "grpc_method"})
-
- serverStreamMsgSent = prom.NewCounterVec(
- prom.CounterOpts{
- Namespace: "grpc",
- Subsystem: "server",
- Name: "msg_sent_total",
- Help: "Total number of gRPC stream messages sent by the server.",
- }, []string{"grpc_type", "grpc_service", "grpc_method"})
-
- serverHandledHistogramEnabled = false
- serverHandledHistogramOpts = prom.HistogramOpts{
- Namespace: "grpc",
- Subsystem: "server",
- Name: "handling_seconds",
- Help: "Histogram of response latency (seconds) of gRPC that had been application-level handled by the server.",
- Buckets: prom.DefBuckets,
- }
- serverHandledHistogram *prom.HistogramVec
-)
-
-func init() {
- prom.MustRegister(serverStartedCounter)
- prom.MustRegister(serverHandledCounter)
- prom.MustRegister(serverStreamMsgReceived)
- prom.MustRegister(serverStreamMsgSent)
-}
-
-type HistogramOption func(*prom.HistogramOpts)
-
-// WithHistogramBuckets allows you to specify custom bucket ranges for histograms if EnableHandlingTimeHistogram is on.
-func WithHistogramBuckets(buckets []float64) HistogramOption {
- return func(o *prom.HistogramOpts) { o.Buckets = buckets }
-}
-
-// EnableHandlingTimeHistogram turns on recording of handling time of RPCs for server-side interceptors.
-// Histogram metrics can be very expensive for Prometheus to retain and query.
-func EnableHandlingTimeHistogram(opts ...HistogramOption) {
- for _, o := range opts {
- o(&serverHandledHistogramOpts)
- }
- if !serverHandledHistogramEnabled {
- serverHandledHistogram = prom.NewHistogramVec(
- serverHandledHistogramOpts,
- []string{"grpc_type", "grpc_service", "grpc_method"},
- )
- prom.Register(serverHandledHistogram)
- }
- serverHandledHistogramEnabled = true
-}
-
type serverReporter struct {
+ metrics *ServerMetrics
rpcType grpcType
serviceName string
methodName string
startTime time.Time
}
-func newServerReporter(rpcType grpcType, fullMethod string) *serverReporter {
- r := &serverReporter{rpcType: rpcType}
- if serverHandledHistogramEnabled {
+func newServerReporter(m *ServerMetrics, rpcType grpcType, fullMethod string) *serverReporter {
+ r := &serverReporter{
+ metrics: m,
+ rpcType: rpcType,
+ }
+ if r.metrics.serverHandledHistogramEnabled {
r.startTime = time.Now()
}
r.serviceName, r.methodName = splitMethodName(fullMethod)
- serverStartedCounter.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc()
+ r.metrics.serverStartedCounter.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc()
return r
}
func (r *serverReporter) ReceivedMessage() {
- serverStreamMsgReceived.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc()
+ r.metrics.serverStreamMsgReceived.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc()
}
func (r *serverReporter) SentMessage() {
- serverStreamMsgSent.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc()
+ r.metrics.serverStreamMsgSent.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc()
}
func (r *serverReporter) Handled(code codes.Code) {
- serverHandledCounter.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName, code.String()).Inc()
- if serverHandledHistogramEnabled {
- serverHandledHistogram.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Observe(time.Since(r.startTime).Seconds())
- }
-}
-
-// preRegisterMethod is invoked on Register of a Server, allowing all gRPC services labels to be pre-populated.
-func preRegisterMethod(serviceName string, mInfo *grpc.MethodInfo) {
- methodName := mInfo.Name
- methodType := string(typeFromMethodInfo(mInfo))
- // These are just references (no increments), as just referencing will create the labels but not set values.
- serverStartedCounter.GetMetricWithLabelValues(methodType, serviceName, methodName)
- serverStreamMsgReceived.GetMetricWithLabelValues(methodType, serviceName, methodName)
- serverStreamMsgSent.GetMetricWithLabelValues(methodType, serviceName, methodName)
- if serverHandledHistogramEnabled {
- serverHandledHistogram.GetMetricWithLabelValues(methodType, serviceName, methodName)
- }
- for _, code := range allCodes {
- serverHandledCounter.GetMetricWithLabelValues(methodType, serviceName, methodName, code.String())
- }
-}
-
-func typeFromMethodInfo(mInfo *grpc.MethodInfo) grpcType {
- if mInfo.IsClientStream == false && mInfo.IsServerStream == false {
- return Unary
- }
- if mInfo.IsClientStream == true && mInfo.IsServerStream == false {
- return ClientStream
- }
- if mInfo.IsClientStream == false && mInfo.IsServerStream == true {
- return ServerStream
+ r.metrics.serverHandledCounter.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName, code.String()).Inc()
+ if r.metrics.serverHandledHistogramEnabled {
+ r.metrics.serverHandledHistogram.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Observe(time.Since(r.startTime).Seconds())
}
- return BidiStream
}
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/util.go b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/util.go
index 372460ac4..7987de35f 100644
--- a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/util.go
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/util.go
@@ -6,9 +6,19 @@ package grpc_prometheus
import (
"strings"
+ "google.golang.org/grpc"
"google.golang.org/grpc/codes"
)
+type grpcType string
+
+const (
+ Unary grpcType = "unary"
+ ClientStream grpcType = "client_stream"
+ ServerStream grpcType = "server_stream"
+ BidiStream grpcType = "bidi_stream"
+)
+
var (
allCodes = []codes.Code{
codes.OK, codes.Canceled, codes.Unknown, codes.InvalidArgument, codes.DeadlineExceeded, codes.NotFound,
@@ -25,3 +35,16 @@ func splitMethodName(fullMethodName string) (string, string) {
}
return "unknown", "unknown"
}
+
+func typeFromMethodInfo(mInfo *grpc.MethodInfo) grpcType {
+ if !mInfo.IsClientStream && !mInfo.IsServerStream {
+ return Unary
+ }
+ if mInfo.IsClientStream && !mInfo.IsServerStream {
+ return ClientStream
+ }
+ if !mInfo.IsClientStream && mInfo.IsServerStream {
+ return ServerStream
+ }
+ return BidiStream
+}
diff --git a/vendor/github.com/prometheus/client_golang/README.md b/vendor/github.com/prometheus/client_golang/README.md
index 0eb0df1df..9bab32a21 100644
--- a/vendor/github.com/prometheus/client_golang/README.md
+++ b/vendor/github.com/prometheus/client_golang/README.md
@@ -2,13 +2,32 @@
[![Build Status](https://travis-ci.org/prometheus/client_golang.svg?branch=master)](https://travis-ci.org/prometheus/client_golang)
[![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/client_golang)](https://goreportcard.com/report/github.com/prometheus/client_golang)
+[![go-doc](https://godoc.org/github.com/prometheus/client_golang?status.svg)](https://godoc.org/github.com/prometheus/client_golang)
This is the [Go](http://golang.org) client library for
[Prometheus](http://prometheus.io). It has two separate parts, one for
instrumenting application code, and one for creating clients that talk to the
Prometheus HTTP API.
-__This library requires Go1.7 or later.__
+__This library requires Go1.9 or later.__ The minimum required patch releases for older Go versions are Go1.9.7 and Go1.10.3.
+
+## Important note about releases and stability
+
+This repository generally follows [Semantic
+Versioning](https://semver.org/). However, the API client in
+prometheus/client_golang/api/… is still considered experimental. Breaking
+changes of the API client will _not_ trigger a new major release. The same is
+true for selected other new features explicitly marked as **EXPERIMENTAL** in
+CHANGELOG.md.
+
+Features that require breaking changes in the stable parts of the repository
+are being batched up and tracked in the [v2
+milestone](https://github.com/prometheus/client_golang/milestone/2). The v2
+development happens in a [separate
+branch](https://github.com/prometheus/client_golang/tree/dev-v2) for the time
+being. v2 releases off that branch will happen once sufficient stability is
+reached. In view of the widespread use of this repository, v1 and v2 will
+coexist for a while to enable a convenient transition.
## Instrumenting applications
@@ -17,8 +36,8 @@ __This library requires Go1.7 or later.__
The
[`prometheus` directory](https://github.com/prometheus/client_golang/tree/master/prometheus)
contains the instrumentation library. See the
-[best practices section](http://prometheus.io/docs/practices/naming/) of the
-Prometheus documentation to learn more about instrumenting applications.
+[guide](https://prometheus.io/docs/guides/go-application/) on the Prometheus
+website to learn more about instrumenting applications.
The
[`examples` directory](https://github.com/prometheus/client_golang/tree/master/examples)
@@ -26,7 +45,7 @@ contains simple examples of instrumented code.
## Client for the Prometheus HTTP API
-[![code-coverage](http://gocover.io/_badge/github.com/prometheus/client_golang/api/prometheus)](http://gocover.io/github.com/prometheus/client_golang/api/prometheus) [![go-doc](https://godoc.org/github.com/prometheus/client_golang/api/prometheus?status.svg)](https://godoc.org/github.com/prometheus/client_golang/api/prometheus)
+[![code-coverage](http://gocover.io/_badge/github.com/prometheus/client_golang/api/prometheus/v1)](http://gocover.io/github.com/prometheus/client_golang/api/prometheus/v1) [![go-doc](https://godoc.org/github.com/prometheus/client_golang/api/prometheus?status.svg)](https://godoc.org/github.com/prometheus/client_golang/api)
The
[`api/prometheus` directory](https://github.com/prometheus/client_golang/tree/master/api/prometheus)
diff --git a/vendor/github.com/prometheus/client_golang/go.mod b/vendor/github.com/prometheus/client_golang/go.mod
new file mode 100644
index 000000000..1540dba22
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/go.mod
@@ -0,0 +1,14 @@
+module github.com/prometheus/client_golang
+
+require (
+ github.com/beorn7/perks v1.0.1
+ github.com/cespare/xxhash/v2 v2.1.1
+ github.com/golang/protobuf v1.3.2
+ github.com/json-iterator/go v1.1.8
+ github.com/prometheus/client_model v0.1.0
+ github.com/prometheus/common v0.7.0
+ github.com/prometheus/procfs v0.0.8
+ golang.org/x/sys v0.0.0-20191220142924-d4481acd189f
+)
+
+go 1.11
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/build_info.go b/vendor/github.com/prometheus/client_golang/prometheus/build_info.go
new file mode 100644
index 000000000..288f0e854
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/build_info.go
@@ -0,0 +1,29 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build go1.12
+
+package prometheus
+
+import "runtime/debug"
+
+// readBuildInfo is a wrapper around debug.ReadBuildInfo for Go 1.12+.
+func readBuildInfo() (path, version, sum string) {
+ path, version, sum = "unknown", "unknown", "unknown"
+ if bi, ok := debug.ReadBuildInfo(); ok {
+ path = bi.Main.Path
+ version = bi.Main.Version
+ sum = bi.Main.Sum
+ }
+ return
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/build_info_pre_1.12.go b/vendor/github.com/prometheus/client_golang/prometheus/build_info_pre_1.12.go
new file mode 100644
index 000000000..6609e2877
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/build_info_pre_1.12.go
@@ -0,0 +1,22 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build !go1.12
+
+package prometheus
+
+// readBuildInfo is a wrapper around debug.ReadBuildInfo for Go versions before
+// 1.12. Remove this whole file once the minimum supported Go version is 1.12.
+func readBuildInfo() (path, version, sum string) {
+ return "unknown", "unknown", "unknown"
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collector.go b/vendor/github.com/prometheus/client_golang/prometheus/collector.go
index 623d3d83f..1e839650d 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/collector.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/collector.go
@@ -29,27 +29,72 @@ type Collector interface {
// collected by this Collector to the provided channel and returns once
// the last descriptor has been sent. The sent descriptors fulfill the
// consistency and uniqueness requirements described in the Desc
- // documentation. (It is valid if one and the same Collector sends
- // duplicate descriptors. Those duplicates are simply ignored. However,
- // two different Collectors must not send duplicate descriptors.) This
- // method idempotently sends the same descriptors throughout the
- // lifetime of the Collector. If a Collector encounters an error while
- // executing this method, it must send an invalid descriptor (created
- // with NewInvalidDesc) to signal the error to the registry.
+ // documentation.
+ //
+ // It is valid if one and the same Collector sends duplicate
+ // descriptors. Those duplicates are simply ignored. However, two
+ // different Collectors must not send duplicate descriptors.
+ //
+ // Sending no descriptor at all marks the Collector as “unchecked”,
+ // i.e. no checks will be performed at registration time, and the
+ // Collector may yield any Metric it sees fit in its Collect method.
+ //
+ // This method idempotently sends the same descriptors throughout the
+ // lifetime of the Collector. It may be called concurrently and
+ // therefore must be implemented in a concurrency safe way.
+ //
+ // If a Collector encounters an error while executing this method, it
+ // must send an invalid descriptor (created with NewInvalidDesc) to
+ // signal the error to the registry.
Describe(chan<- *Desc)
// Collect is called by the Prometheus registry when collecting
// metrics. The implementation sends each collected metric via the
// provided channel and returns once the last metric has been sent. The
- // descriptor of each sent metric is one of those returned by
- // Describe. Returned metrics that share the same descriptor must differ
- // in their variable label values. This method may be called
- // concurrently and must therefore be implemented in a concurrency safe
- // way. Blocking occurs at the expense of total performance of rendering
- // all registered metrics. Ideally, Collector implementations support
- // concurrent readers.
+ // descriptor of each sent metric is one of those returned by Describe
+ // (unless the Collector is unchecked, see above). Returned metrics that
+ // share the same descriptor must differ in their variable label
+ // values.
+ //
+ // This method may be called concurrently and must therefore be
+ // implemented in a concurrency safe way. Blocking occurs at the expense
+ // of total performance of rendering all registered metrics. Ideally,
+ // Collector implementations support concurrent readers.
Collect(chan<- Metric)
}
+// DescribeByCollect is a helper to implement the Describe method of a custom
+// Collector. It collects the metrics from the provided Collector and sends
+// their descriptors to the provided channel.
+//
+// If a Collector collects the same metrics throughout its lifetime, its
+// Describe method can simply be implemented as:
+//
+// func (c customCollector) Describe(ch chan<- *Desc) {
+// DescribeByCollect(c, ch)
+// }
+//
+// However, this will not work if the metrics collected change dynamically over
+// the lifetime of the Collector in a way that their combined set of descriptors
+// changes as well. The shortcut implementation will then violate the contract
+// of the Describe method. If a Collector sometimes collects no metrics at all
+// (for example vectors like CounterVec, GaugeVec, etc., which only collect
+// metrics after a metric with a fully specified label set has been accessed),
+// it might even get registered as an unchecked Collector (cf. the Register
+// method of the Registerer interface). Hence, only use this shortcut
+// implementation of Describe if you are certain to fulfill the contract.
+//
+// The Collector example demonstrates a use of DescribeByCollect.
+func DescribeByCollect(c Collector, descs chan<- *Desc) {
+ metrics := make(chan Metric)
+ go func() {
+ c.Collect(metrics)
+ close(metrics)
+ }()
+ for m := range metrics {
+ descs <- m.Desc()
+ }
+}
+
// selfCollector implements Collector for a single Metric so that the Metric
// collects itself. Add it as an anonymous field to a struct that implements
// Metric, and call init with the Metric itself as an argument.
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/counter.go b/vendor/github.com/prometheus/client_golang/prometheus/counter.go
index 765e4550c..d463e36d3 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/counter.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/counter.go
@@ -136,7 +136,7 @@ func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
return &CounterVec{
metricVec: newMetricVec(desc, func(lvs ...string) Metric {
if len(lvs) != len(desc.variableLabels) {
- panic(errInconsistentCardinality)
+ panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs))
}
result := &counter{desc: desc, labelPairs: makeLabelPairs(desc, lvs)}
result.init(result) // Init self-collection.
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/vendor/github.com/prometheus/client_golang/prometheus/desc.go
index 4a755b0fa..e3232d79f 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/desc.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/desc.go
@@ -19,6 +19,7 @@ import (
"sort"
"strings"
+ "github.com/cespare/xxhash/v2"
"github.com/golang/protobuf/proto"
"github.com/prometheus/common/model"
@@ -67,7 +68,7 @@ type Desc struct {
// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc
// and will be reported on registration time. variableLabels and constLabels can
-// be nil if no such labels should be set. fqName and help must not be empty.
+// be nil if no such labels should be set. fqName must not be empty.
//
// variableLabels only contain the label names. Their label values are variable
// and therefore not part of the Desc. (They are managed within the Metric.)
@@ -80,10 +81,6 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *
help: help,
variableLabels: variableLabels,
}
- if help == "" {
- d.err = errors.New("empty help string")
- return d
- }
if !model.IsValidMetricName(model.LabelValue(fqName)) {
d.err = fmt.Errorf("%q is not a valid metric name", fqName)
return d
@@ -97,7 +94,7 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *
// First add only the const label names and sort them...
for labelName := range constLabels {
if !checkLabelName(labelName) {
- d.err = fmt.Errorf("%q is not a valid label name", labelName)
+ d.err = fmt.Errorf("%q is not a valid label name for metric %q", labelName, fqName)
return d
}
labelNames = append(labelNames, labelName)
@@ -119,7 +116,7 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *
// dimension with a different mix between preset and variable labels.
for _, labelName := range variableLabels {
if !checkLabelName(labelName) {
- d.err = fmt.Errorf("%q is not a valid label name", labelName)
+ d.err = fmt.Errorf("%q is not a valid label name for metric %q", labelName, fqName)
return d
}
labelNames = append(labelNames, "$"+labelName)
@@ -130,24 +127,24 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *
return d
}
- vh := hashNew()
+ xxh := xxhash.New()
for _, val := range labelValues {
- vh = hashAdd(vh, val)
- vh = hashAddByte(vh, separatorByte)
+ xxh.WriteString(val)
+ xxh.Write(separatorByteSlice)
}
- d.id = vh
+ d.id = xxh.Sum64()
// Sort labelNames so that order doesn't matter for the hash.
sort.Strings(labelNames)
// Now hash together (in this order) the help string and the sorted
// label names.
- lh := hashNew()
- lh = hashAdd(lh, help)
- lh = hashAddByte(lh, separatorByte)
+ xxh.Reset()
+ xxh.WriteString(help)
+ xxh.Write(separatorByteSlice)
for _, labelName := range labelNames {
- lh = hashAdd(lh, labelName)
- lh = hashAddByte(lh, separatorByte)
+ xxh.WriteString(labelName)
+ xxh.Write(separatorByteSlice)
}
- d.dimHash = lh
+ d.dimHash = xxh.Sum64()
d.constLabelPairs = make([]*dto.LabelPair, 0, len(constLabels))
for n, v := range constLabels {
@@ -156,7 +153,7 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *
Value: proto.String(v),
})
}
- sort.Sort(LabelPairSorter(d.constLabelPairs))
+ sort.Sort(labelPairSorter(d.constLabelPairs))
return d
}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/doc.go b/vendor/github.com/prometheus/client_golang/prometheus/doc.go
index 36ef15567..01977de66 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/doc.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/doc.go
@@ -11,10 +11,12 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-// Package prometheus provides metrics primitives to instrument code for
-// monitoring. It also offers a registry for metrics. Sub-packages allow to
-// expose the registered metrics via HTTP (package promhttp) or push them to a
-// Pushgateway (package push).
+// Package prometheus is the core instrumentation package. It provides metrics
+// primitives to instrument code for monitoring. It also offers a registry for
+// metrics. Sub-packages allow to expose the registered metrics via HTTP
+// (package promhttp) or push them to a Pushgateway (package push). There is
+// also a sub-package promauto, which provides metrics constructors with
+// automatic registration.
//
// All exported functions and methods are safe to be used concurrently unless
// specified otherwise.
@@ -72,7 +74,10 @@
// The number of exported identifiers in this package might appear a bit
// overwhelming. However, in addition to the basic plumbing shown in the example
// above, you only need to understand the different metric types and their
-// vector versions for basic usage.
+// vector versions for basic usage. Furthermore, if you are not concerned with
+// fine-grained control of when and how to register metrics with the registry,
+// have a look at the promauto package, which will effectively allow you to
+// ignore registration altogether in simple cases.
//
// Above, you have already touched the Counter and the Gauge. There are two more
// advanced metric types: the Summary and Histogram. A more thorough description
@@ -116,7 +121,17 @@
// NewConstSummary (and their respective Must… versions). That will happen in
// the Collect method. The Describe method has to return separate Desc
// instances, representative of the “throw-away” metrics to be created later.
-// NewDesc comes in handy to create those Desc instances.
+// NewDesc comes in handy to create those Desc instances. Alternatively, you
+// could return no Desc at all, which will mark the Collector “unchecked”. No
+// checks are performed at registration time, but metric consistency will still
+// be ensured at scrape time, i.e. any inconsistencies will lead to scrape
+// errors. Thus, with unchecked Collectors, the responsibility to not collect
+// metrics that lead to inconsistencies in the total scrape result lies with the
+// implementer of the Collector. While this is not a desirable state, it is
+// sometimes necessary. The typical use case is a situation where the exact
+// metrics to be returned by a Collector cannot be predicted at registration
+// time, but the implementer has sufficient knowledge of the whole system to
+// guarantee metric consistency.
//
// The Collector example illustrates the use case. You can also look at the
// source code of the processCollector (mirroring process metrics), the
@@ -168,7 +183,6 @@
// method can then expose the gathered metrics in some way. Usually, the metrics
// are served via HTTP on the /metrics endpoint. That's happening in the example
// above. The tools to expose metrics via HTTP are in the promhttp sub-package.
-// (The top-level functions in the prometheus package are deprecated.)
//
// Pushing to the Pushgateway
//
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/fnv.go b/vendor/github.com/prometheus/client_golang/prometheus/fnv.go
index e3b67df8a..3d383a735 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/fnv.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/fnv.go
@@ -1,3 +1,16 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
package prometheus
// Inline and byte-free variant of hash/fnv's fnv64a.
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go
index 17c72d7eb..56d8cc209 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go
@@ -147,7 +147,7 @@ func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {
return &GaugeVec{
metricVec: newMetricVec(desc, func(lvs ...string) Metric {
if len(lvs) != len(desc.variableLabels) {
- panic(errInconsistentCardinality)
+ panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs))
}
result := &gauge{desc: desc, labelPairs: makeLabelPairs(desc, lvs)}
result.init(result) // Init self-collection.
@@ -273,9 +273,12 @@ type GaugeFunc interface {
// NewGaugeFunc creates a new GaugeFunc based on the provided GaugeOpts. The
// value reported is determined by calling the given function from within the
// Write method. Take into account that metric collection may happen
-// concurrently. If that results in concurrent calls to Write, like in the case
-// where a GaugeFunc is directly registered with Prometheus, the provided
-// function must be concurrency-safe.
+// concurrently. Therefore, it must be safe to call the provided function
+// concurrently.
+//
+// NewGaugeFunc is a good way to create an “info” style metric with a constant
+// value of 1. Example:
+// https://github.com/prometheus/common/blob/8558a5b7db3c84fa38b4766966059a7bd5bfa2ee/version/info.go#L36-L56
func NewGaugeFunc(opts GaugeOpts, function func() float64) GaugeFunc {
return newValueFunc(NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
index 096454af9..dc9247fed 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
@@ -1,9 +1,22 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
package prometheus
import (
- "fmt"
"runtime"
"runtime/debug"
+ "sync"
"time"
)
@@ -13,12 +26,41 @@ type goCollector struct {
gcDesc *Desc
goInfoDesc *Desc
- // metrics to describe and collect
- metrics memStatsMetrics
+ // ms... are memstats related.
+ msLast *runtime.MemStats // Previously collected memstats.
+ msLastTimestamp time.Time
+ msMtx sync.Mutex // Protects msLast and msLastTimestamp.
+ msMetrics memStatsMetrics
+ msRead func(*runtime.MemStats) // For mocking in tests.
+ msMaxWait time.Duration // Wait time for fresh memstats.
+ msMaxAge time.Duration // Maximum allowed age of old memstats.
}
-// NewGoCollector returns a collector which exports metrics about the current
-// go process.
+// NewGoCollector returns a collector that exports metrics about the current Go
+// process. This includes memory stats. To collect those, runtime.ReadMemStats
+// is called. This requires to “stop the world”, which usually only happens for
+// garbage collection (GC). Take the following implications into account when
+// deciding whether to use the Go collector:
+//
+// 1. The performance impact of stopping the world is the more relevant the more
+// frequently metrics are collected. However, with Go1.9 or later the
+// stop-the-world time per metrics collection is very short (~25µs) so that the
+// performance impact will only matter in rare cases. However, with older Go
+// versions, the stop-the-world duration depends on the heap size and can be
+// quite significant (~1.7 ms/GiB as per
+// https://go-review.googlesource.com/c/go/+/34937).
+//
+// 2. During an ongoing GC, nothing else can stop the world. Therefore, if the
+// metrics collection happens to coincide with GC, it will only complete after
+// GC has finished. Usually, GC is fast enough to not cause problems. However,
+// with a very large heap, GC might take multiple seconds, which is enough to
+// cause scrape timeouts in common setups. To avoid this problem, the Go
+// collector will use the memstats from a previous collection if
+// runtime.ReadMemStats takes more than 1s. However, if there are no previously
+// collected memstats, or their collection is more than 5m ago, the collection
+// will block until runtime.ReadMemStats succeeds. (The problem might be solved
+// in Go1.13, see https://github.com/golang/go/issues/19812 for the related Go
+// issue.)
func NewGoCollector() Collector {
return &goCollector{
goroutinesDesc: NewDesc(
@@ -37,7 +79,11 @@ func NewGoCollector() Collector {
"go_info",
"Information about the Go environment.",
nil, Labels{"version": runtime.Version()}),
- metrics: memStatsMetrics{
+ msLast: &runtime.MemStats{},
+ msRead: runtime.ReadMemStats,
+ msMaxWait: time.Second,
+ msMaxAge: 5 * time.Minute,
+ msMetrics: memStatsMetrics{
{
desc: NewDesc(
memstatNamespace("alloc_bytes"),
@@ -236,7 +282,7 @@ func NewGoCollector() Collector {
}
func memstatNamespace(s string) string {
- return fmt.Sprintf("go_memstats_%s", s)
+ return "go_memstats_" + s
}
// Describe returns all descriptions of the collector.
@@ -245,13 +291,27 @@ func (c *goCollector) Describe(ch chan<- *Desc) {
ch <- c.threadsDesc
ch <- c.gcDesc
ch <- c.goInfoDesc
- for _, i := range c.metrics {
+ for _, i := range c.msMetrics {
ch <- i.desc
}
}
// Collect returns the current state of all metrics of the collector.
func (c *goCollector) Collect(ch chan<- Metric) {
+ var (
+ ms = &runtime.MemStats{}
+ done = make(chan struct{})
+ )
+ // Start reading memstats first as it might take a while.
+ go func() {
+ c.msRead(ms)
+ c.msMtx.Lock()
+ c.msLast = ms
+ c.msLastTimestamp = time.Now()
+ c.msMtx.Unlock()
+ close(done)
+ }()
+
ch <- MustNewConstMetric(c.goroutinesDesc, GaugeValue, float64(runtime.NumGoroutine()))
n, _ := runtime.ThreadCreateProfile(nil)
ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, float64(n))
@@ -265,13 +325,35 @@ func (c *goCollector) Collect(ch chan<- Metric) {
quantiles[float64(idx+1)/float64(len(stats.PauseQuantiles)-1)] = pq.Seconds()
}
quantiles[0.0] = stats.PauseQuantiles[0].Seconds()
- ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), float64(stats.PauseTotal.Seconds()), quantiles)
+ ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), stats.PauseTotal.Seconds(), quantiles)
ch <- MustNewConstMetric(c.goInfoDesc, GaugeValue, 1)
- ms := &runtime.MemStats{}
- runtime.ReadMemStats(ms)
- for _, i := range c.metrics {
+ timer := time.NewTimer(c.msMaxWait)
+ select {
+ case <-done: // Our own ReadMemStats succeeded in time. Use it.
+ timer.Stop() // Important for high collection frequencies to not pile up timers.
+ c.msCollect(ch, ms)
+ return
+ case <-timer.C: // Time out, use last memstats if possible. Continue below.
+ }
+ c.msMtx.Lock()
+ if time.Since(c.msLastTimestamp) < c.msMaxAge {
+ // Last memstats are recent enough. Collect from them under the lock.
+ c.msCollect(ch, c.msLast)
+ c.msMtx.Unlock()
+ return
+ }
+ // If we are here, the last memstats are too old or don't exist. We have
+ // to wait until our own ReadMemStats finally completes. For that to
+ // happen, we have to release the lock.
+ c.msMtx.Unlock()
+ <-done
+ c.msCollect(ch, ms)
+}
+
+func (c *goCollector) msCollect(ch chan<- Metric, ms *runtime.MemStats) {
+ for _, i := range c.msMetrics {
ch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms))
}
}
@@ -282,3 +364,33 @@ type memStatsMetrics []struct {
eval func(*runtime.MemStats) float64
valType ValueType
}
+
+// NewBuildInfoCollector returns a collector collecting a single metric
+// "go_build_info" with the constant value 1 and three labels "path", "version",
+// and "checksum". Their label values contain the main module path, version, and
+// checksum, respectively. The labels will only have meaningful values if the
+// binary is built with Go module support and from source code retrieved from
+// the source repository (rather than the local file system). This is usually
+// accomplished by building from outside of GOPATH, specifying the full address
+// of the main package, e.g. "GO111MODULE=on go run
+// github.com/prometheus/client_golang/examples/random". If built without Go
+// module support, all label values will be "unknown". If built with Go module
+// support but using the source code from the local file system, the "path" will
+// be set appropriately, but "checksum" will be empty and "version" will be
+// "(devel)".
+//
+// This collector uses only the build information for the main module. See
+// https://github.com/povilasv/prommod for an example of a collector for the
+// module dependencies.
+func NewBuildInfoCollector() Collector {
+ path, version, sum := readBuildInfo()
+ c := &selfCollector{MustNewConstMetric(
+ NewDesc(
+ "go_build_info",
+ "Build information about the main Go module.",
+ nil, Labels{"path": path, "version": version, "checksum": sum},
+ ),
+ GaugeValue, 1)}
+ c.init(c.self)
+ return c
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
index 331783a75..ac2614d52 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
@@ -16,7 +16,9 @@ package prometheus
import (
"fmt"
"math"
+ "runtime"
"sort"
+ "sync"
"sync/atomic"
"github.com/golang/protobuf/proto"
@@ -108,8 +110,9 @@ func ExponentialBuckets(start, factor float64, count int) []float64 {
}
// HistogramOpts bundles the options for creating a Histogram metric. It is
-// mandatory to set Name and Help to a non-empty string. All other fields are
-// optional and can safely be left at their zero value.
+// mandatory to set Name to a non-empty string. All other fields are optional
+// and can safely be left at their zero value, although it is strongly
+// encouraged to set a Help string.
type HistogramOpts struct {
// Namespace, Subsystem, and Name are components of the fully-qualified
// name of the Histogram (created by joining these components with
@@ -120,7 +123,7 @@ type HistogramOpts struct {
Subsystem string
Name string
- // Help provides information about this Histogram. Mandatory!
+ // Help provides information about this Histogram.
//
// Metrics with the same fully-qualified name must have the same Help
// string.
@@ -135,7 +138,7 @@ type HistogramOpts struct {
// better covered by target labels set by the scraping Prometheus
// server, or by one specific metric (e.g. a build_info or a
// machine_role metric). See also
- // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels
+ // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels
ConstLabels Labels
// Buckets defines the buckets into which observations are counted. Each
@@ -162,7 +165,7 @@ func NewHistogram(opts HistogramOpts) Histogram {
func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram {
if len(desc.variableLabels) != len(labelValues) {
- panic(errInconsistentCardinality)
+ panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, labelValues))
}
for _, n := range desc.variableLabels {
@@ -184,6 +187,7 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
desc: desc,
upperBounds: opts.Buckets,
labelPairs: makeLabelPairs(desc, labelValues),
+ counts: [2]*histogramCounts{{}, {}},
}
for i, upperBound := range h.upperBounds {
if i < len(h.upperBounds)-1 {
@@ -200,30 +204,56 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
}
}
}
- // Finally we know the final length of h.upperBounds and can make counts.
- h.counts = make([]uint64, len(h.upperBounds))
+ // Finally we know the final length of h.upperBounds and can make buckets
+ // for both counts:
+ h.counts[0].buckets = make([]uint64, len(h.upperBounds))
+ h.counts[1].buckets = make([]uint64, len(h.upperBounds))
h.init(h) // Init self-collection.
return h
}
-type histogram struct {
+type histogramCounts struct {
// sumBits contains the bits of the float64 representing the sum of all
// observations. sumBits and count have to go first in the struct to
// guarantee alignment for atomic operations.
// http://golang.org/pkg/sync/atomic/#pkg-note-BUG
sumBits uint64
count uint64
+ buckets []uint64
+}
+
+type histogram struct {
+ // countAndHotIdx enables lock-free writes with use of atomic updates.
+ // The most significant bit is the hot index [0 or 1] of the count field
+ // below. Observe calls update the hot one. All remaining bits count the
+ // number of Observe calls. Observe starts by incrementing this counter,
+ // and finish by incrementing the count field in the respective
+ // histogramCounts, as a marker for completion.
+ //
+ // Calls of the Write method (which are non-mutating reads from the
+ // perspective of the histogram) swap the hot–cold under the writeMtx
+ // lock. A cooldown is awaited (while locked) by comparing the number of
+ // observations with the initiation count. Once they match, then the
+ // last observation on the now cool one has completed. All cool fields must
+ // be merged into the new hot before releasing writeMtx.
+ //
+ // Fields with atomic access first! See alignment constraint:
+ // http://golang.org/pkg/sync/atomic/#pkg-note-BUG
+ countAndHotIdx uint64
selfCollector
- // Note that there is no mutex required.
+ desc *Desc
+ writeMtx sync.Mutex // Only used in the Write method.
- desc *Desc
+ // Two counts, one is "hot" for lock-free observations, the other is
+ // "cold" for writing out a dto.Metric. It has to be an array of
+ // pointers to guarantee 64bit alignment of the histogramCounts, see
+ // http://golang.org/pkg/sync/atomic/#pkg-note-BUG.
+ counts [2]*histogramCounts
upperBounds []float64
- counts []uint64
-
- labelPairs []*dto.LabelPair
+ labelPairs []*dto.LabelPair
}
func (h *histogram) Desc() *Desc {
@@ -241,36 +271,84 @@ func (h *histogram) Observe(v float64) {
// 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op
// 300 buckets: 154 ns/op linear - binary 61.6 ns/op
i := sort.SearchFloat64s(h.upperBounds, v)
- if i < len(h.counts) {
- atomic.AddUint64(&h.counts[i], 1)
+
+ // We increment h.countAndHotIdx so that the counter in the lower
+ // 63 bits gets incremented. At the same time, we get the new value
+ // back, which we can use to find the currently-hot counts.
+ n := atomic.AddUint64(&h.countAndHotIdx, 1)
+ hotCounts := h.counts[n>>63]
+
+ if i < len(h.upperBounds) {
+ atomic.AddUint64(&hotCounts.buckets[i], 1)
}
- atomic.AddUint64(&h.count, 1)
for {
- oldBits := atomic.LoadUint64(&h.sumBits)
+ oldBits := atomic.LoadUint64(&hotCounts.sumBits)
newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
- if atomic.CompareAndSwapUint64(&h.sumBits, oldBits, newBits) {
+ if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
break
}
}
+ // Increment count last as we take it as a signal that the observation
+ // is complete.
+ atomic.AddUint64(&hotCounts.count, 1)
}
func (h *histogram) Write(out *dto.Metric) error {
- his := &dto.Histogram{}
- buckets := make([]*dto.Bucket, len(h.upperBounds))
+ // For simplicity, we protect this whole method by a mutex. It is not in
+ // the hot path, i.e. Observe is called much more often than Write. The
+ // complication of making Write lock-free isn't worth it, if possible at
+ // all.
+ h.writeMtx.Lock()
+ defer h.writeMtx.Unlock()
+
+ // Adding 1<<63 switches the hot index (from 0 to 1 or from 1 to 0)
+ // without touching the count bits. See the struct comments for a full
+ // description of the algorithm.
+ n := atomic.AddUint64(&h.countAndHotIdx, 1<<63)
+ // count is contained unchanged in the lower 63 bits.
+ count := n & ((1 << 63) - 1)
+ // The most significant bit tells us which counts is hot. The complement
+ // is thus the cold one.
+ hotCounts := h.counts[n>>63]
+ coldCounts := h.counts[(^n)>>63]
+
+ // Await cooldown.
+ for count != atomic.LoadUint64(&coldCounts.count) {
+ runtime.Gosched() // Let observations get work done.
+ }
- his.SampleSum = proto.Float64(math.Float64frombits(atomic.LoadUint64(&h.sumBits)))
- his.SampleCount = proto.Uint64(atomic.LoadUint64(&h.count))
- var count uint64
+ his := &dto.Histogram{
+ Bucket: make([]*dto.Bucket, len(h.upperBounds)),
+ SampleCount: proto.Uint64(count),
+ SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))),
+ }
+ var cumCount uint64
for i, upperBound := range h.upperBounds {
- count += atomic.LoadUint64(&h.counts[i])
- buckets[i] = &dto.Bucket{
- CumulativeCount: proto.Uint64(count),
+ cumCount += atomic.LoadUint64(&coldCounts.buckets[i])
+ his.Bucket[i] = &dto.Bucket{
+ CumulativeCount: proto.Uint64(cumCount),
UpperBound: proto.Float64(upperBound),
}
}
- his.Bucket = buckets
+
out.Histogram = his
out.Label = h.labelPairs
+
+ // Finally add all the cold counts to the new hot counts and reset the cold counts.
+ atomic.AddUint64(&hotCounts.count, count)
+ atomic.StoreUint64(&coldCounts.count, 0)
+ for {
+ oldBits := atomic.LoadUint64(&hotCounts.sumBits)
+ newBits := math.Float64bits(math.Float64frombits(oldBits) + his.GetSampleSum())
+ if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
+ atomic.StoreUint64(&coldCounts.sumBits, 0)
+ break
+ }
+ }
+ for i := range h.upperBounds {
+ atomic.AddUint64(&hotCounts.buckets[i], atomic.LoadUint64(&coldCounts.buckets[i]))
+ atomic.StoreUint64(&coldCounts.buckets[i], 0)
+ }
return nil
}
@@ -454,7 +532,7 @@ func (h *constHistogram) Write(out *dto.Metric) error {
// bucket.
//
// NewConstHistogram returns an error if the length of labelValues is not
-// consistent with the variable labels in Desc.
+// consistent with the variable labels in Desc or if Desc is invalid.
func NewConstHistogram(
desc *Desc,
count uint64,
@@ -462,6 +540,9 @@ func NewConstHistogram(
buckets map[float64]uint64,
labelValues ...string,
) (Metric, error) {
+ if desc.err != nil {
+ return nil, desc.err
+ }
if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil {
return nil, err
}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/http.go b/vendor/github.com/prometheus/client_golang/prometheus/http.go
deleted file mode 100644
index bfee5c6eb..000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/http.go
+++ /dev/null
@@ -1,524 +0,0 @@
-// Copyright 2014 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package prometheus
-
-import (
- "bufio"
- "bytes"
- "compress/gzip"
- "fmt"
- "io"
- "net"
- "net/http"
- "strconv"
- "strings"
- "sync"
- "time"
-
- "github.com/prometheus/common/expfmt"
-)
-
-// TODO(beorn7): Remove this whole file. It is a partial mirror of
-// promhttp/http.go (to avoid circular import chains) where everything HTTP
-// related should live. The functions here are just for avoiding
-// breakage. Everything is deprecated.
-
-const (
- contentTypeHeader = "Content-Type"
- contentLengthHeader = "Content-Length"
- contentEncodingHeader = "Content-Encoding"
- acceptEncodingHeader = "Accept-Encoding"
-)
-
-var bufPool sync.Pool
-
-func getBuf() *bytes.Buffer {
- buf := bufPool.Get()
- if buf == nil {
- return &bytes.Buffer{}
- }
- return buf.(*bytes.Buffer)
-}
-
-func giveBuf(buf *bytes.Buffer) {
- buf.Reset()
- bufPool.Put(buf)
-}
-
-// Handler returns an HTTP handler for the DefaultGatherer. It is
-// already instrumented with InstrumentHandler (using "prometheus" as handler
-// name).
-//
-// Deprecated: Please note the issues described in the doc comment of
-// InstrumentHandler. You might want to consider using promhttp.Handler instead
-// (which is not instrumented, but can be instrumented with the tooling provided
-// in package promhttp).
-func Handler() http.Handler {
- return InstrumentHandler("prometheus", UninstrumentedHandler())
-}
-
-// UninstrumentedHandler returns an HTTP handler for the DefaultGatherer.
-//
-// Deprecated: Use promhttp.Handler instead. See there for further documentation.
-func UninstrumentedHandler() http.Handler {
- return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
- mfs, err := DefaultGatherer.Gather()
- if err != nil {
- http.Error(w, "An error has occurred during metrics collection:\n\n"+err.Error(), http.StatusInternalServerError)
- return
- }
-
- contentType := expfmt.Negotiate(req.Header)
- buf := getBuf()
- defer giveBuf(buf)
- writer, encoding := decorateWriter(req, buf)
- enc := expfmt.NewEncoder(writer, contentType)
- var lastErr error
- for _, mf := range mfs {
- if err := enc.Encode(mf); err != nil {
- lastErr = err
- http.Error(w, "An error has occurred during metrics encoding:\n\n"+err.Error(), http.StatusInternalServerError)
- return
- }
- }
- if closer, ok := writer.(io.Closer); ok {
- closer.Close()
- }
- if lastErr != nil && buf.Len() == 0 {
- http.Error(w, "No metrics encoded, last error:\n\n"+lastErr.Error(), http.StatusInternalServerError)
- return
- }
- header := w.Header()
- header.Set(contentTypeHeader, string(contentType))
- header.Set(contentLengthHeader, fmt.Sprint(buf.Len()))
- if encoding != "" {
- header.Set(contentEncodingHeader, encoding)
- }
- w.Write(buf.Bytes())
- })
-}
-
-// decorateWriter wraps a writer to handle gzip compression if requested. It
-// returns the decorated writer and the appropriate "Content-Encoding" header
-// (which is empty if no compression is enabled).
-func decorateWriter(request *http.Request, writer io.Writer) (io.Writer, string) {
- header := request.Header.Get(acceptEncodingHeader)
- parts := strings.Split(header, ",")
- for _, part := range parts {
- part := strings.TrimSpace(part)
- if part == "gzip" || strings.HasPrefix(part, "gzip;") {
- return gzip.NewWriter(writer), "gzip"
- }
- }
- return writer, ""
-}
-
-var instLabels = []string{"method", "code"}
-
-type nower interface {
- Now() time.Time
-}
-
-type nowFunc func() time.Time
-
-func (n nowFunc) Now() time.Time {
- return n()
-}
-
-var now nower = nowFunc(func() time.Time {
- return time.Now()
-})
-
-func nowSeries(t ...time.Time) nower {
- return nowFunc(func() time.Time {
- defer func() {
- t = t[1:]
- }()
-
- return t[0]
- })
-}
-
-// InstrumentHandler wraps the given HTTP handler for instrumentation. It
-// registers four metric collectors (if not already done) and reports HTTP
-// metrics to the (newly or already) registered collectors: http_requests_total
-// (CounterVec), http_request_duration_microseconds (Summary),
-// http_request_size_bytes (Summary), http_response_size_bytes (Summary). Each
-// has a constant label named "handler" with the provided handlerName as
-// value. http_requests_total is a metric vector partitioned by HTTP method
-// (label name "method") and HTTP status code (label name "code").
-//
-// Deprecated: InstrumentHandler has several issues. Use the tooling provided in
-// package promhttp instead. The issues are the following:
-//
-// - It uses Summaries rather than Histograms. Summaries are not useful if
-// aggregation across multiple instances is required.
-//
-// - It uses microseconds as unit, which is deprecated and should be replaced by
-// seconds.
-//
-// - The size of the request is calculated in a separate goroutine. Since this
-// calculator requires access to the request header, it creates a race with
-// any writes to the header performed during request handling.
-// httputil.ReverseProxy is a prominent example for a handler
-// performing such writes.
-//
-// - It has additional issues with HTTP/2, cf.
-// https://github.com/prometheus/client_golang/issues/272.
-func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFunc {
- return InstrumentHandlerFunc(handlerName, handler.ServeHTTP)
-}
-
-// InstrumentHandlerFunc wraps the given function for instrumentation. It
-// otherwise works in the same way as InstrumentHandler (and shares the same
-// issues).
-//
-// Deprecated: InstrumentHandlerFunc is deprecated for the same reasons as
-// InstrumentHandler is. Use the tooling provided in package promhttp instead.
-func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
- return InstrumentHandlerFuncWithOpts(
- SummaryOpts{
- Subsystem: "http",
- ConstLabels: Labels{"handler": handlerName},
- Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
- },
- handlerFunc,
- )
-}
-
-// InstrumentHandlerWithOpts works like InstrumentHandler (and shares the same
-// issues) but provides more flexibility (at the cost of a more complex call
-// syntax). As InstrumentHandler, this function registers four metric
-// collectors, but it uses the provided SummaryOpts to create them. However, the
-// fields "Name" and "Help" in the SummaryOpts are ignored. "Name" is replaced
-// by "requests_total", "request_duration_microseconds", "request_size_bytes",
-// and "response_size_bytes", respectively. "Help" is replaced by an appropriate
-// help string. The names of the variable labels of the http_requests_total
-// CounterVec are "method" (get, post, etc.), and "code" (HTTP status code).
-//
-// If InstrumentHandlerWithOpts is called as follows, it mimics exactly the
-// behavior of InstrumentHandler:
-//
-// prometheus.InstrumentHandlerWithOpts(
-// prometheus.SummaryOpts{
-// Subsystem: "http",
-// ConstLabels: prometheus.Labels{"handler": handlerName},
-// },
-// handler,
-// )
-//
-// Technical detail: "requests_total" is a CounterVec, not a SummaryVec, so it
-// cannot use SummaryOpts. Instead, a CounterOpts struct is created internally,
-// and all its fields are set to the equally named fields in the provided
-// SummaryOpts.
-//
-// Deprecated: InstrumentHandlerWithOpts is deprecated for the same reasons as
-// InstrumentHandler is. Use the tooling provided in package promhttp instead.
-func InstrumentHandlerWithOpts(opts SummaryOpts, handler http.Handler) http.HandlerFunc {
- return InstrumentHandlerFuncWithOpts(opts, handler.ServeHTTP)
-}
-
-// InstrumentHandlerFuncWithOpts works like InstrumentHandlerFunc (and shares
-// the same issues) but provides more flexibility (at the cost of a more complex
-// call syntax). See InstrumentHandlerWithOpts for details how the provided
-// SummaryOpts are used.
-//
-// Deprecated: InstrumentHandlerFuncWithOpts is deprecated for the same reasons
-// as InstrumentHandler is. Use the tooling provided in package promhttp instead.
-func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
- reqCnt := NewCounterVec(
- CounterOpts{
- Namespace: opts.Namespace,
- Subsystem: opts.Subsystem,
- Name: "requests_total",
- Help: "Total number of HTTP requests made.",
- ConstLabels: opts.ConstLabels,
- },
- instLabels,
- )
- if err := Register(reqCnt); err != nil {
- if are, ok := err.(AlreadyRegisteredError); ok {
- reqCnt = are.ExistingCollector.(*CounterVec)
- } else {
- panic(err)
- }
- }
-
- opts.Name = "request_duration_microseconds"
- opts.Help = "The HTTP request latencies in microseconds."
- reqDur := NewSummary(opts)
- if err := Register(reqDur); err != nil {
- if are, ok := err.(AlreadyRegisteredError); ok {
- reqDur = are.ExistingCollector.(Summary)
- } else {
- panic(err)
- }
- }
-
- opts.Name = "request_size_bytes"
- opts.Help = "The HTTP request sizes in bytes."
- reqSz := NewSummary(opts)
- if err := Register(reqSz); err != nil {
- if are, ok := err.(AlreadyRegisteredError); ok {
- reqSz = are.ExistingCollector.(Summary)
- } else {
- panic(err)
- }
- }
-
- opts.Name = "response_size_bytes"
- opts.Help = "The HTTP response sizes in bytes."
- resSz := NewSummary(opts)
- if err := Register(resSz); err != nil {
- if are, ok := err.(AlreadyRegisteredError); ok {
- resSz = are.ExistingCollector.(Summary)
- } else {
- panic(err)
- }
- }
-
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- now := time.Now()
-
- delegate := &responseWriterDelegator{ResponseWriter: w}
- out := computeApproximateRequestSize(r)
-
- _, cn := w.(http.CloseNotifier)
- _, fl := w.(http.Flusher)
- _, hj := w.(http.Hijacker)
- _, rf := w.(io.ReaderFrom)
- var rw http.ResponseWriter
- if cn && fl && hj && rf {
- rw = &fancyResponseWriterDelegator{delegate}
- } else {
- rw = delegate
- }
- handlerFunc(rw, r)
-
- elapsed := float64(time.Since(now)) / float64(time.Microsecond)
-
- method := sanitizeMethod(r.Method)
- code := sanitizeCode(delegate.status)
- reqCnt.WithLabelValues(method, code).Inc()
- reqDur.Observe(elapsed)
- resSz.Observe(float64(delegate.written))
- reqSz.Observe(float64(<-out))
- })
-}
-
-func computeApproximateRequestSize(r *http.Request) <-chan int {
- // Get URL length in current go routine for avoiding a race condition.
- // HandlerFunc that runs in parallel may modify the URL.
- s := 0
- if r.URL != nil {
- s += len(r.URL.String())
- }
-
- out := make(chan int, 1)
-
- go func() {
- s += len(r.Method)
- s += len(r.Proto)
- for name, values := range r.Header {
- s += len(name)
- for _, value := range values {
- s += len(value)
- }
- }
- s += len(r.Host)
-
- // N.B. r.Form and r.MultipartForm are assumed to be included in r.URL.
-
- if r.ContentLength != -1 {
- s += int(r.ContentLength)
- }
- out <- s
- close(out)
- }()
-
- return out
-}
-
-type responseWriterDelegator struct {
- http.ResponseWriter
-
- handler, method string
- status int
- written int64
- wroteHeader bool
-}
-
-func (r *responseWriterDelegator) WriteHeader(code int) {
- r.status = code
- r.wroteHeader = true
- r.ResponseWriter.WriteHeader(code)
-}
-
-func (r *responseWriterDelegator) Write(b []byte) (int, error) {
- if !r.wroteHeader {
- r.WriteHeader(http.StatusOK)
- }
- n, err := r.ResponseWriter.Write(b)
- r.written += int64(n)
- return n, err
-}
-
-type fancyResponseWriterDelegator struct {
- *responseWriterDelegator
-}
-
-func (f *fancyResponseWriterDelegator) CloseNotify() <-chan bool {
- return f.ResponseWriter.(http.CloseNotifier).CloseNotify()
-}
-
-func (f *fancyResponseWriterDelegator) Flush() {
- f.ResponseWriter.(http.Flusher).Flush()
-}
-
-func (f *fancyResponseWriterDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) {
- return f.ResponseWriter.(http.Hijacker).Hijack()
-}
-
-func (f *fancyResponseWriterDelegator) ReadFrom(r io.Reader) (int64, error) {
- if !f.wroteHeader {
- f.WriteHeader(http.StatusOK)
- }
- n, err := f.ResponseWriter.(io.ReaderFrom).ReadFrom(r)
- f.written += n
- return n, err
-}
-
-func sanitizeMethod(m string) string {
- switch m {
- case "GET", "get":
- return "get"
- case "PUT", "put":
- return "put"
- case "HEAD", "head":
- return "head"
- case "POST", "post":
- return "post"
- case "DELETE", "delete":
- return "delete"
- case "CONNECT", "connect":
- return "connect"
- case "OPTIONS", "options":
- return "options"
- case "NOTIFY", "notify":
- return "notify"
- default:
- return strings.ToLower(m)
- }
-}
-
-func sanitizeCode(s int) string {
- switch s {
- case 100:
- return "100"
- case 101:
- return "101"
-
- case 200:
- return "200"
- case 201:
- return "201"
- case 202:
- return "202"
- case 203:
- return "203"
- case 204:
- return "204"
- case 205:
- return "205"
- case 206:
- return "206"
-
- case 300:
- return "300"
- case 301:
- return "301"
- case 302:
- return "302"
- case 304:
- return "304"
- case 305:
- return "305"
- case 307:
- return "307"
-
- case 400:
- return "400"
- case 401:
- return "401"
- case 402:
- return "402"
- case 403:
- return "403"
- case 404:
- return "404"
- case 405:
- return "405"
- case 406:
- return "406"
- case 407:
- return "407"
- case 408:
- return "408"
- case 409:
- return "409"
- case 410:
- return "410"
- case 411:
- return "411"
- case 412:
- return "412"
- case 413:
- return "413"
- case 414:
- return "414"
- case 415:
- return "415"
- case 416:
- return "416"
- case 417:
- return "417"
- case 418:
- return "418"
-
- case 500:
- return "500"
- case 501:
- return "501"
- case 502:
- return "502"
- case 503:
- return "503"
- case 504:
- return "504"
- case 505:
- return "505"
-
- case 428:
- return "428"
- case 429:
- return "429"
- case 431:
- return "431"
- case 511:
- return "511"
-
- default:
- return strconv.Itoa(s)
- }
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go
new file mode 100644
index 000000000..351c26e1a
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go
@@ -0,0 +1,85 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package internal
+
+import (
+ "sort"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+// metricSorter is a sortable slice of *dto.Metric.
+type metricSorter []*dto.Metric
+
+func (s metricSorter) Len() int {
+ return len(s)
+}
+
+func (s metricSorter) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+func (s metricSorter) Less(i, j int) bool {
+ if len(s[i].Label) != len(s[j].Label) {
+ // This should not happen. The metrics are
+ // inconsistent. However, we have to deal with the fact, as
+ // people might use custom collectors or metric family injection
+ // to create inconsistent metrics. So let's simply compare the
+ // number of labels in this case. That will still yield
+ // reproducible sorting.
+ return len(s[i].Label) < len(s[j].Label)
+ }
+ for n, lp := range s[i].Label {
+ vi := lp.GetValue()
+ vj := s[j].Label[n].GetValue()
+ if vi != vj {
+ return vi < vj
+ }
+ }
+
+ // We should never arrive here. Multiple metrics with the same
+ // label set in the same scrape will lead to undefined ingestion
+ // behavior. However, as above, we have to provide stable sorting
+ // here, even for inconsistent metrics. So sort equal metrics
+ // by their timestamp, with missing timestamps (implying "now")
+ // coming last.
+ if s[i].TimestampMs == nil {
+ return false
+ }
+ if s[j].TimestampMs == nil {
+ return true
+ }
+ return s[i].GetTimestampMs() < s[j].GetTimestampMs()
+}
+
+// NormalizeMetricFamilies returns a MetricFamily slice with empty
+// MetricFamilies pruned and the remaining MetricFamilies sorted by name within
+// the slice, with the contained Metrics sorted within each MetricFamily.
+func NormalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily {
+ for _, mf := range metricFamiliesByName {
+ sort.Sort(metricSorter(mf.Metric))
+ }
+ names := make([]string, 0, len(metricFamiliesByName))
+ for name, mf := range metricFamiliesByName {
+ if len(mf.Metric) > 0 {
+ names = append(names, name)
+ }
+ }
+ sort.Strings(names)
+ result := make([]*dto.MetricFamily, 0, len(names))
+ for _, name := range names {
+ result = append(result, metricFamiliesByName[name])
+ }
+ return result
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/labels.go b/vendor/github.com/prometheus/client_golang/prometheus/labels.go
index 2502e3734..2744443ac 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/labels.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/labels.go
@@ -1,3 +1,16 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
package prometheus
import (
@@ -24,9 +37,22 @@ const reservedLabelPrefix = "__"
var errInconsistentCardinality = errors.New("inconsistent label cardinality")
+func makeInconsistentCardinalityError(fqName string, labels, labelValues []string) error {
+ return fmt.Errorf(
+ "%s: %q has %d variable labels named %q but %d values %q were provided",
+ errInconsistentCardinality, fqName,
+ len(labels), labels,
+ len(labelValues), labelValues,
+ )
+}
+
func validateValuesInLabels(labels Labels, expectedNumberOfValues int) error {
if len(labels) != expectedNumberOfValues {
- return errInconsistentCardinality
+ return fmt.Errorf(
+ "%s: expected %d label values but got %d in %#v",
+ errInconsistentCardinality, expectedNumberOfValues,
+ len(labels), labels,
+ )
}
for name, val := range labels {
@@ -40,7 +66,11 @@ func validateValuesInLabels(labels Labels, expectedNumberOfValues int) error {
func validateLabelValues(vals []string, expectedNumberOfValues int) error {
if len(vals) != expectedNumberOfValues {
- return errInconsistentCardinality
+ return fmt.Errorf(
+ "%s: expected %d label values but got %d in %#v",
+ errInconsistentCardinality, expectedNumberOfValues,
+ len(vals), vals,
+ )
}
for _, val := range vals {
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go
index 6213ee812..0df1eff88 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/metric.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/metric.go
@@ -15,11 +15,15 @@ package prometheus
import (
"strings"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/prometheus/common/model"
dto "github.com/prometheus/client_model/go"
)
-const separatorByte byte = 255
+var separatorByteSlice = []byte{model.SeparatorByte} // For convenient use with xxhash.
// A Metric models a single sample value with its meta data being exported to
// Prometheus. Implementations of Metric in this package are Gauge, Counter,
@@ -43,9 +47,8 @@ type Metric interface {
// While populating dto.Metric, it is the responsibility of the
// implementation to ensure validity of the Metric protobuf (like valid
// UTF-8 strings or syntactically valid metric and label names). It is
- // recommended to sort labels lexicographically. (Implementers may find
- // LabelPairSorter useful for that.) Callers of Write should still make
- // sure of sorting if they depend on it.
+ // recommended to sort labels lexicographically. Callers of Write should
+ // still make sure of sorting if they depend on it.
Write(*dto.Metric) error
// TODO(beorn7): The original rationale of passing in a pre-allocated
// dto.Metric protobuf to save allocations has disappeared. The
@@ -57,8 +60,9 @@ type Metric interface {
// implementation XXX has its own XXXOpts type, but in most cases, it is just be
// an alias of this type (which might change when the requirement arises.)
//
-// It is mandatory to set Name and Help to a non-empty string. All other fields
-// are optional and can safely be left at their zero value.
+// It is mandatory to set Name to a non-empty string. All other fields are
+// optional and can safely be left at their zero value, although it is strongly
+// encouraged to set a Help string.
type Opts struct {
// Namespace, Subsystem, and Name are components of the fully-qualified
// name of the Metric (created by joining these components with
@@ -69,7 +73,7 @@ type Opts struct {
Subsystem string
Name string
- // Help provides information about this metric. Mandatory!
+ // Help provides information about this metric.
//
// Metrics with the same fully-qualified name must have the same Help
// string.
@@ -110,37 +114,22 @@ func BuildFQName(namespace, subsystem, name string) string {
return name
}
-// LabelPairSorter implements sort.Interface. It is used to sort a slice of
-// dto.LabelPair pointers. This is useful for implementing the Write method of
-// custom metrics.
-type LabelPairSorter []*dto.LabelPair
+// labelPairSorter implements sort.Interface. It is used to sort a slice of
+// dto.LabelPair pointers.
+type labelPairSorter []*dto.LabelPair
-func (s LabelPairSorter) Len() int {
+func (s labelPairSorter) Len() int {
return len(s)
}
-func (s LabelPairSorter) Swap(i, j int) {
+func (s labelPairSorter) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
-func (s LabelPairSorter) Less(i, j int) bool {
+func (s labelPairSorter) Less(i, j int) bool {
return s[i].GetName() < s[j].GetName()
}
-type hashSorter []uint64
-
-func (s hashSorter) Len() int {
- return len(s)
-}
-
-func (s hashSorter) Swap(i, j int) {
- s[i], s[j] = s[j], s[i]
-}
-
-func (s hashSorter) Less(i, j int) bool {
- return s[i] < s[j]
-}
-
type invalidMetric struct {
desc *Desc
err error
@@ -156,3 +145,31 @@ func NewInvalidMetric(desc *Desc, err error) Metric {
func (m *invalidMetric) Desc() *Desc { return m.desc }
func (m *invalidMetric) Write(*dto.Metric) error { return m.err }
+
+type timestampedMetric struct {
+ Metric
+ t time.Time
+}
+
+func (m timestampedMetric) Write(pb *dto.Metric) error {
+ e := m.Metric.Write(pb)
+ pb.TimestampMs = proto.Int64(m.t.Unix()*1000 + int64(m.t.Nanosecond()/1000000))
+ return e
+}
+
+// NewMetricWithTimestamp returns a new Metric wrapping the provided Metric in a
+// way that it has an explicit timestamp set to the provided Time. This is only
+// useful in rare cases as the timestamp of a Prometheus metric should usually
+// be set by the Prometheus server during scraping. Exceptions include mirroring
+// metrics with given timestamps from other metric
+// sources.
+//
+// NewMetricWithTimestamp works best with MustNewConstMetric,
+// MustNewConstHistogram, and MustNewConstSummary, see example.
+//
+// Currently, the exposition formats used by Prometheus are limited to
+// millisecond resolution. Thus, the provided time will be rounded down to the
+// next full millisecond value.
+func NewMetricWithTimestamp(t time.Time, m Metric) Metric {
+ return timestampedMetric{Metric: m, t: t}
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
index 94b2553e1..9b8097942 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
@@ -13,46 +13,61 @@
package prometheus
-import "github.com/prometheus/procfs"
+import (
+ "errors"
+ "os"
+)
type processCollector struct {
- pid int
collectFn func(chan<- Metric)
pidFn func() (int, error)
+ reportErrors bool
cpuTotal *Desc
openFDs, maxFDs *Desc
- vsize, rss *Desc
+ vsize, maxVsize *Desc
+ rss *Desc
startTime *Desc
}
-// NewProcessCollector returns a collector which exports the current state of
-// process metrics including cpu, memory and file descriptor usage as well as
-// the process start time for the given process id under the given namespace.
-func NewProcessCollector(pid int, namespace string) Collector {
- return NewProcessCollectorPIDFn(
- func() (int, error) { return pid, nil },
- namespace,
- )
+// ProcessCollectorOpts defines the behavior of a process metrics collector
+// created with NewProcessCollector.
+type ProcessCollectorOpts struct {
+ // PidFn returns the PID of the process the collector collects metrics
+ // for. It is called upon each collection. By default, the PID of the
+ // current process is used, as determined on construction time by
+ // calling os.Getpid().
+ PidFn func() (int, error)
+ // If non-empty, each of the collected metrics is prefixed by the
+ // provided string and an underscore ("_").
+ Namespace string
+ // If true, any error encountered during collection is reported as an
+ // invalid metric (see NewInvalidMetric). Otherwise, errors are ignored
+ // and the collected metrics will be incomplete. (Possibly, no metrics
+ // will be collected at all.) While that's usually not desired, it is
+ // appropriate for the common "mix-in" of process metrics, where process
+ // metrics are nice to have, but failing to collect them should not
+ // disrupt the collection of the remaining metrics.
+ ReportErrors bool
}
-// NewProcessCollectorPIDFn returns a collector which exports the current state
-// of process metrics including cpu, memory and file descriptor usage as well
-// as the process start time under the given namespace. The given pidFn is
-// called on each collect and is used to determine the process to export
-// metrics for.
-func NewProcessCollectorPIDFn(
- pidFn func() (int, error),
- namespace string,
-) Collector {
+// NewProcessCollector returns a collector which exports the current state of
+// process metrics including CPU, memory and file descriptor usage as well as
+// the process start time. The detailed behavior is defined by the provided
+// ProcessCollectorOpts. The zero value of ProcessCollectorOpts creates a
+// collector for the current process with an empty namespace string and no error
+// reporting.
+//
+// The collector only works on operating systems with a Linux-style proc
+// filesystem and on Microsoft Windows. On other operating systems, it will not
+// collect any metrics.
+func NewProcessCollector(opts ProcessCollectorOpts) Collector {
ns := ""
- if len(namespace) > 0 {
- ns = namespace + "_"
+ if len(opts.Namespace) > 0 {
+ ns = opts.Namespace + "_"
}
- c := processCollector{
- pidFn: pidFn,
- collectFn: func(chan<- Metric) {},
-
+ c := &processCollector{
+ reportErrors: opts.ReportErrors,
cpuTotal: NewDesc(
ns+"process_cpu_seconds_total",
"Total user and system CPU time spent in seconds.",
@@ -73,6 +88,11 @@ func NewProcessCollectorPIDFn(
"Virtual memory size in bytes.",
nil, nil,
),
+ maxVsize: NewDesc(
+ ns+"process_virtual_memory_max_bytes",
+ "Maximum amount of virtual memory available in bytes.",
+ nil, nil,
+ ),
rss: NewDesc(
ns+"process_resident_memory_bytes",
"Resident memory size in bytes.",
@@ -85,12 +105,23 @@ func NewProcessCollectorPIDFn(
),
}
+ if opts.PidFn == nil {
+ pid := os.Getpid()
+ c.pidFn = func() (int, error) { return pid, nil }
+ } else {
+ c.pidFn = opts.PidFn
+ }
+
// Set up process metric collection if supported by the runtime.
- if _, err := procfs.NewStat(); err == nil {
+ if canCollectProcess() {
c.collectFn = c.processCollect
+ } else {
+ c.collectFn = func(ch chan<- Metric) {
+ c.reportError(ch, nil, errors.New("process metrics not supported on this platform"))
+ }
}
- return &c
+ return c
}
// Describe returns all descriptions of the collector.
@@ -99,6 +130,7 @@ func (c *processCollector) Describe(ch chan<- *Desc) {
ch <- c.openFDs
ch <- c.maxFDs
ch <- c.vsize
+ ch <- c.maxVsize
ch <- c.rss
ch <- c.startTime
}
@@ -108,33 +140,12 @@ func (c *processCollector) Collect(ch chan<- Metric) {
c.collectFn(ch)
}
-// TODO(ts): Bring back error reporting by reverting 7faf9e7 as soon as the
-// client allows users to configure the error behavior.
-func (c *processCollector) processCollect(ch chan<- Metric) {
- pid, err := c.pidFn()
- if err != nil {
+func (c *processCollector) reportError(ch chan<- Metric, desc *Desc, err error) {
+ if !c.reportErrors {
return
}
-
- p, err := procfs.NewProc(pid)
- if err != nil {
- return
- }
-
- if stat, err := p.NewStat(); err == nil {
- ch <- MustNewConstMetric(c.cpuTotal, CounterValue, stat.CPUTime())
- ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(stat.VirtualMemory()))
- ch <- MustNewConstMetric(c.rss, GaugeValue, float64(stat.ResidentMemory()))
- if startTime, err := stat.StartTime(); err == nil {
- ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime)
- }
- }
-
- if fds, err := p.FileDescriptorsLen(); err == nil {
- ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(fds))
- }
-
- if limits, err := p.NewLimits(); err == nil {
- ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(limits.OpenFiles))
+ if desc == nil {
+ desc = NewInvalidDesc(err)
}
+ ch <- NewInvalidMetric(desc, err)
}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go
new file mode 100644
index 000000000..3117461cd
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go
@@ -0,0 +1,65 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build !windows
+
+package prometheus
+
+import (
+ "github.com/prometheus/procfs"
+)
+
+func canCollectProcess() bool {
+ _, err := procfs.NewDefaultFS()
+ return err == nil
+}
+
+func (c *processCollector) processCollect(ch chan<- Metric) {
+ pid, err := c.pidFn()
+ if err != nil {
+ c.reportError(ch, nil, err)
+ return
+ }
+
+ p, err := procfs.NewProc(pid)
+ if err != nil {
+ c.reportError(ch, nil, err)
+ return
+ }
+
+ if stat, err := p.Stat(); err == nil {
+ ch <- MustNewConstMetric(c.cpuTotal, CounterValue, stat.CPUTime())
+ ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(stat.VirtualMemory()))
+ ch <- MustNewConstMetric(c.rss, GaugeValue, float64(stat.ResidentMemory()))
+ if startTime, err := stat.StartTime(); err == nil {
+ ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime)
+ } else {
+ c.reportError(ch, c.startTime, err)
+ }
+ } else {
+ c.reportError(ch, nil, err)
+ }
+
+ if fds, err := p.FileDescriptorsLen(); err == nil {
+ ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(fds))
+ } else {
+ c.reportError(ch, c.openFDs, err)
+ }
+
+ if limits, err := p.Limits(); err == nil {
+ ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(limits.OpenFiles))
+ ch <- MustNewConstMetric(c.maxVsize, GaugeValue, float64(limits.AddressSpace))
+ } else {
+ c.reportError(ch, nil, err)
+ }
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go
new file mode 100644
index 000000000..e0b935d1f
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go
@@ -0,0 +1,112 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "syscall"
+ "unsafe"
+
+ "golang.org/x/sys/windows"
+)
+
+func canCollectProcess() bool {
+ return true
+}
+
+var (
+ modpsapi = syscall.NewLazyDLL("psapi.dll")
+ modkernel32 = syscall.NewLazyDLL("kernel32.dll")
+
+ procGetProcessMemoryInfo = modpsapi.NewProc("GetProcessMemoryInfo")
+ procGetProcessHandleCount = modkernel32.NewProc("GetProcessHandleCount")
+)
+
+type processMemoryCounters struct {
+ // https://docs.microsoft.com/en-us/windows/desktop/api/psapi/ns-psapi-_process_memory_counters_ex
+ _ uint32
+ PageFaultCount uint32
+ PeakWorkingSetSize uint64
+ WorkingSetSize uint64
+ QuotaPeakPagedPoolUsage uint64
+ QuotaPagedPoolUsage uint64
+ QuotaPeakNonPagedPoolUsage uint64
+ QuotaNonPagedPoolUsage uint64
+ PagefileUsage uint64
+ PeakPagefileUsage uint64
+ PrivateUsage uint64
+}
+
+func getProcessMemoryInfo(handle windows.Handle) (processMemoryCounters, error) {
+ mem := processMemoryCounters{}
+ r1, _, err := procGetProcessMemoryInfo.Call(
+ uintptr(handle),
+ uintptr(unsafe.Pointer(&mem)),
+ uintptr(unsafe.Sizeof(mem)),
+ )
+ if r1 != 1 {
+ return mem, err
+ } else {
+ return mem, nil
+ }
+}
+
+func getProcessHandleCount(handle windows.Handle) (uint32, error) {
+ var count uint32
+ r1, _, err := procGetProcessHandleCount.Call(
+ uintptr(handle),
+ uintptr(unsafe.Pointer(&count)),
+ )
+ if r1 != 1 {
+ return 0, err
+ } else {
+ return count, nil
+ }
+}
+
+func (c *processCollector) processCollect(ch chan<- Metric) {
+ h, err := windows.GetCurrentProcess()
+ if err != nil {
+ c.reportError(ch, nil, err)
+ return
+ }
+
+ var startTime, exitTime, kernelTime, userTime windows.Filetime
+ err = windows.GetProcessTimes(h, &startTime, &exitTime, &kernelTime, &userTime)
+ if err != nil {
+ c.reportError(ch, nil, err)
+ return
+ }
+ ch <- MustNewConstMetric(c.startTime, GaugeValue, float64(startTime.Nanoseconds()/1e9))
+ ch <- MustNewConstMetric(c.cpuTotal, CounterValue, fileTimeToSeconds(kernelTime)+fileTimeToSeconds(userTime))
+
+ mem, err := getProcessMemoryInfo(h)
+ if err != nil {
+ c.reportError(ch, nil, err)
+ return
+ }
+ ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(mem.PrivateUsage))
+ ch <- MustNewConstMetric(c.rss, GaugeValue, float64(mem.WorkingSetSize))
+
+ handles, err := getProcessHandleCount(h)
+ if err != nil {
+ c.reportError(ch, nil, err)
+ return
+ }
+ ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(handles))
+ ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(16*1024*1024)) // Windows has a hard-coded max limit, not per-process.
+}
+
+func fileTimeToSeconds(ft windows.Filetime) float64 {
+ return float64(uint64(ft.HighDateTime)<<32+uint64(ft.LowDateTime)) / 1e7
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go
new file mode 100644
index 000000000..d1354b101
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go
@@ -0,0 +1,366 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package promhttp
+
+import (
+ "bufio"
+ "io"
+ "net"
+ "net/http"
+)
+
+const (
+ closeNotifier = 1 << iota
+ flusher
+ hijacker
+ readerFrom
+ pusher
+)
+
+type delegator interface {
+ http.ResponseWriter
+
+ Status() int
+ Written() int64
+}
+
+type responseWriterDelegator struct {
+ http.ResponseWriter
+
+ status int
+ written int64
+ wroteHeader bool
+ observeWriteHeader func(int)
+}
+
+func (r *responseWriterDelegator) Status() int {
+ return r.status
+}
+
+func (r *responseWriterDelegator) Written() int64 {
+ return r.written
+}
+
+func (r *responseWriterDelegator) WriteHeader(code int) {
+ r.status = code
+ r.wroteHeader = true
+ r.ResponseWriter.WriteHeader(code)
+ if r.observeWriteHeader != nil {
+ r.observeWriteHeader(code)
+ }
+}
+
+func (r *responseWriterDelegator) Write(b []byte) (int, error) {
+ // If applicable, call WriteHeader here so that observeWriteHeader is
+ // handled appropriately.
+ if !r.wroteHeader {
+ r.WriteHeader(http.StatusOK)
+ }
+ n, err := r.ResponseWriter.Write(b)
+ r.written += int64(n)
+ return n, err
+}
+
+type closeNotifierDelegator struct{ *responseWriterDelegator }
+type flusherDelegator struct{ *responseWriterDelegator }
+type hijackerDelegator struct{ *responseWriterDelegator }
+type readerFromDelegator struct{ *responseWriterDelegator }
+type pusherDelegator struct{ *responseWriterDelegator }
+
+func (d closeNotifierDelegator) CloseNotify() <-chan bool {
+ //lint:ignore SA1019 http.CloseNotifier is deprecated but we don't want to
+ //remove support from client_golang yet.
+ return d.ResponseWriter.(http.CloseNotifier).CloseNotify()
+}
+func (d flusherDelegator) Flush() {
+ // If applicable, call WriteHeader here so that observeWriteHeader is
+ // handled appropriately.
+ if !d.wroteHeader {
+ d.WriteHeader(http.StatusOK)
+ }
+ d.ResponseWriter.(http.Flusher).Flush()
+}
+func (d hijackerDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) {
+ return d.ResponseWriter.(http.Hijacker).Hijack()
+}
+func (d readerFromDelegator) ReadFrom(re io.Reader) (int64, error) {
+ // If applicable, call WriteHeader here so that observeWriteHeader is
+ // handled appropriately.
+ if !d.wroteHeader {
+ d.WriteHeader(http.StatusOK)
+ }
+ n, err := d.ResponseWriter.(io.ReaderFrom).ReadFrom(re)
+ d.written += n
+ return n, err
+}
+func (d pusherDelegator) Push(target string, opts *http.PushOptions) error {
+ return d.ResponseWriter.(http.Pusher).Push(target, opts)
+}
+
+var pickDelegator = make([]func(*responseWriterDelegator) delegator, 32)
+
+func init() {
+ // TODO(beorn7): Code generation would help here.
+ pickDelegator[0] = func(d *responseWriterDelegator) delegator { // 0
+ return d
+ }
+ pickDelegator[closeNotifier] = func(d *responseWriterDelegator) delegator { // 1
+ return closeNotifierDelegator{d}
+ }
+ pickDelegator[flusher] = func(d *responseWriterDelegator) delegator { // 2
+ return flusherDelegator{d}
+ }
+ pickDelegator[flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 3
+ return struct {
+ *responseWriterDelegator
+ http.Flusher
+ http.CloseNotifier
+ }{d, flusherDelegator{d}, closeNotifierDelegator{d}}
+ }
+ pickDelegator[hijacker] = func(d *responseWriterDelegator) delegator { // 4
+ return hijackerDelegator{d}
+ }
+ pickDelegator[hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 5
+ return struct {
+ *responseWriterDelegator
+ http.Hijacker
+ http.CloseNotifier
+ }{d, hijackerDelegator{d}, closeNotifierDelegator{d}}
+ }
+ pickDelegator[hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 6
+ return struct {
+ *responseWriterDelegator
+ http.Hijacker
+ http.Flusher
+ }{d, hijackerDelegator{d}, flusherDelegator{d}}
+ }
+ pickDelegator[hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 7
+ return struct {
+ *responseWriterDelegator
+ http.Hijacker
+ http.Flusher
+ http.CloseNotifier
+ }{d, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
+ }
+ pickDelegator[readerFrom] = func(d *responseWriterDelegator) delegator { // 8
+ return readerFromDelegator{d}
+ }
+ pickDelegator[readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 9
+ return struct {
+ *responseWriterDelegator
+ io.ReaderFrom
+ http.CloseNotifier
+ }{d, readerFromDelegator{d}, closeNotifierDelegator{d}}
+ }
+ pickDelegator[readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 10
+ return struct {
+ *responseWriterDelegator
+ io.ReaderFrom
+ http.Flusher
+ }{d, readerFromDelegator{d}, flusherDelegator{d}}
+ }
+ pickDelegator[readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 11
+ return struct {
+ *responseWriterDelegator
+ io.ReaderFrom
+ http.Flusher
+ http.CloseNotifier
+ }{d, readerFromDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
+ }
+ pickDelegator[readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 12
+ return struct {
+ *responseWriterDelegator
+ io.ReaderFrom
+ http.Hijacker
+ }{d, readerFromDelegator{d}, hijackerDelegator{d}}
+ }
+ pickDelegator[readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 13
+ return struct {
+ *responseWriterDelegator
+ io.ReaderFrom
+ http.Hijacker
+ http.CloseNotifier
+ }{d, readerFromDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}}
+ }
+ pickDelegator[readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 14
+ return struct {
+ *responseWriterDelegator
+ io.ReaderFrom
+ http.Hijacker
+ http.Flusher
+ }{d, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}}
+ }
+ pickDelegator[readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 15
+ return struct {
+ *responseWriterDelegator
+ io.ReaderFrom
+ http.Hijacker
+ http.Flusher
+ http.CloseNotifier
+ }{d, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
+ }
+ pickDelegator[pusher] = func(d *responseWriterDelegator) delegator { // 16
+ return pusherDelegator{d}
+ }
+ pickDelegator[pusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 17
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ http.CloseNotifier
+ }{d, pusherDelegator{d}, closeNotifierDelegator{d}}
+ }
+ pickDelegator[pusher+flusher] = func(d *responseWriterDelegator) delegator { // 18
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ http.Flusher
+ }{d, pusherDelegator{d}, flusherDelegator{d}}
+ }
+ pickDelegator[pusher+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 19
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ http.Flusher
+ http.CloseNotifier
+ }{d, pusherDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
+ }
+ pickDelegator[pusher+hijacker] = func(d *responseWriterDelegator) delegator { // 20
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ http.Hijacker
+ }{d, pusherDelegator{d}, hijackerDelegator{d}}
+ }
+ pickDelegator[pusher+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 21
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ http.Hijacker
+ http.CloseNotifier
+ }{d, pusherDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}}
+ }
+ pickDelegator[pusher+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 22
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ http.Hijacker
+ http.Flusher
+ }{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}}
+ }
+ pickDelegator[pusher+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { //23
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ http.Hijacker
+ http.Flusher
+ http.CloseNotifier
+ }{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
+ }
+ pickDelegator[pusher+readerFrom] = func(d *responseWriterDelegator) delegator { // 24
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ io.ReaderFrom
+ }{d, pusherDelegator{d}, readerFromDelegator{d}}
+ }
+ pickDelegator[pusher+readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 25
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ io.ReaderFrom
+ http.CloseNotifier
+ }{d, pusherDelegator{d}, readerFromDelegator{d}, closeNotifierDelegator{d}}
+ }
+ pickDelegator[pusher+readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 26
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ io.ReaderFrom
+ http.Flusher
+ }{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}}
+ }
+ pickDelegator[pusher+readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 27
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ io.ReaderFrom
+ http.Flusher
+ http.CloseNotifier
+ }{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
+ }
+ pickDelegator[pusher+readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 28
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ io.ReaderFrom
+ http.Hijacker
+ }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}}
+ }
+ pickDelegator[pusher+readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 29
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ io.ReaderFrom
+ http.Hijacker
+ http.CloseNotifier
+ }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}}
+ }
+ pickDelegator[pusher+readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 30
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ io.ReaderFrom
+ http.Hijacker
+ http.Flusher
+ }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}}
+ }
+ pickDelegator[pusher+readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 31
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ io.ReaderFrom
+ http.Hijacker
+ http.Flusher
+ http.CloseNotifier
+ }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
+ }
+}
+
+func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator {
+ d := &responseWriterDelegator{
+ ResponseWriter: w,
+ observeWriteHeader: observeWriteHeaderFunc,
+ }
+
+ id := 0
+ //lint:ignore SA1019 http.CloseNotifier is deprecated but we don't want to
+ //remove support from client_golang yet.
+ if _, ok := w.(http.CloseNotifier); ok {
+ id += closeNotifier
+ }
+ if _, ok := w.(http.Flusher); ok {
+ id += flusher
+ }
+ if _, ok := w.(http.Hijacker); ok {
+ id += hijacker
+ }
+ if _, ok := w.(io.ReaderFrom); ok {
+ id += readerFrom
+ }
+ if _, ok := w.(http.Pusher); ok {
+ id += pusher
+ }
+
+ return pickDelegator[id](d)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
new file mode 100644
index 000000000..cea5a90fd
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
@@ -0,0 +1,349 @@
+// Copyright 2016 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package promhttp provides tooling around HTTP servers and clients.
+//
+// First, the package allows the creation of http.Handler instances to expose
+// Prometheus metrics via HTTP. promhttp.Handler acts on the
+// prometheus.DefaultGatherer. With HandlerFor, you can create a handler for a
+// custom registry or anything that implements the Gatherer interface. It also
+// allows the creation of handlers that act differently on errors or allow to
+// log errors.
+//
+// Second, the package provides tooling to instrument instances of http.Handler
+// via middleware. Middleware wrappers follow the naming scheme
+// InstrumentHandlerX, where X describes the intended use of the middleware.
+// See each function's doc comment for specific details.
+//
+// Finally, the package allows for an http.RoundTripper to be instrumented via
+// middleware. Middleware wrappers follow the naming scheme
+// InstrumentRoundTripperX, where X describes the intended use of the
+// middleware. See each function's doc comment for specific details.
+package promhttp
+
+import (
+ "compress/gzip"
+ "fmt"
+ "io"
+ "net/http"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/prometheus/common/expfmt"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+const (
+ contentTypeHeader = "Content-Type"
+ contentEncodingHeader = "Content-Encoding"
+ acceptEncodingHeader = "Accept-Encoding"
+)
+
+var gzipPool = sync.Pool{
+ New: func() interface{} {
+ return gzip.NewWriter(nil)
+ },
+}
+
+// Handler returns an http.Handler for the prometheus.DefaultGatherer, using
+// default HandlerOpts, i.e. it reports the first error as an HTTP error, it has
+// no error logging, and it applies compression if requested by the client.
+//
+// The returned http.Handler is already instrumented using the
+// InstrumentMetricHandler function and the prometheus.DefaultRegisterer. If you
+// create multiple http.Handlers by separate calls of the Handler function, the
+// metrics used for instrumentation will be shared between them, providing
+// global scrape counts.
+//
+// This function is meant to cover the bulk of basic use cases. If you are doing
+// anything that requires more customization (including using a non-default
+// Gatherer, different instrumentation, and non-default HandlerOpts), use the
+// HandlerFor function. See there for details.
+func Handler() http.Handler {
+ return InstrumentMetricHandler(
+ prometheus.DefaultRegisterer, HandlerFor(prometheus.DefaultGatherer, HandlerOpts{}),
+ )
+}
+
+// HandlerFor returns an uninstrumented http.Handler for the provided
+// Gatherer. The behavior of the Handler is defined by the provided
+// HandlerOpts. Thus, HandlerFor is useful to create http.Handlers for custom
+// Gatherers, with non-default HandlerOpts, and/or with custom (or no)
+// instrumentation. Use the InstrumentMetricHandler function to apply the same
+// kind of instrumentation as it is used by the Handler function.
+func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
+ var (
+ inFlightSem chan struct{}
+ errCnt = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Name: "promhttp_metric_handler_errors_total",
+ Help: "Total number of internal errors encountered by the promhttp metric handler.",
+ },
+ []string{"cause"},
+ )
+ )
+
+ if opts.MaxRequestsInFlight > 0 {
+ inFlightSem = make(chan struct{}, opts.MaxRequestsInFlight)
+ }
+ if opts.Registry != nil {
+ // Initialize all possibilites that can occur below.
+ errCnt.WithLabelValues("gathering")
+ errCnt.WithLabelValues("encoding")
+ if err := opts.Registry.Register(errCnt); err != nil {
+ if are, ok := err.(prometheus.AlreadyRegisteredError); ok {
+ errCnt = are.ExistingCollector.(*prometheus.CounterVec)
+ } else {
+ panic(err)
+ }
+ }
+ }
+
+ h := http.HandlerFunc(func(rsp http.ResponseWriter, req *http.Request) {
+ if inFlightSem != nil {
+ select {
+ case inFlightSem <- struct{}{}: // All good, carry on.
+ defer func() { <-inFlightSem }()
+ default:
+ http.Error(rsp, fmt.Sprintf(
+ "Limit of concurrent requests reached (%d), try again later.", opts.MaxRequestsInFlight,
+ ), http.StatusServiceUnavailable)
+ return
+ }
+ }
+ mfs, err := reg.Gather()
+ if err != nil {
+ if opts.ErrorLog != nil {
+ opts.ErrorLog.Println("error gathering metrics:", err)
+ }
+ errCnt.WithLabelValues("gathering").Inc()
+ switch opts.ErrorHandling {
+ case PanicOnError:
+ panic(err)
+ case ContinueOnError:
+ if len(mfs) == 0 {
+ // Still report the error if no metrics have been gathered.
+ httpError(rsp, err)
+ return
+ }
+ case HTTPErrorOnError:
+ httpError(rsp, err)
+ return
+ }
+ }
+
+ contentType := expfmt.Negotiate(req.Header)
+ header := rsp.Header()
+ header.Set(contentTypeHeader, string(contentType))
+
+ w := io.Writer(rsp)
+ if !opts.DisableCompression && gzipAccepted(req.Header) {
+ header.Set(contentEncodingHeader, "gzip")
+ gz := gzipPool.Get().(*gzip.Writer)
+ defer gzipPool.Put(gz)
+
+ gz.Reset(w)
+ defer gz.Close()
+
+ w = gz
+ }
+
+ enc := expfmt.NewEncoder(w, contentType)
+
+ var lastErr error
+ for _, mf := range mfs {
+ if err := enc.Encode(mf); err != nil {
+ lastErr = err
+ if opts.ErrorLog != nil {
+ opts.ErrorLog.Println("error encoding and sending metric family:", err)
+ }
+ errCnt.WithLabelValues("encoding").Inc()
+ switch opts.ErrorHandling {
+ case PanicOnError:
+ panic(err)
+ case ContinueOnError:
+ // Handled later.
+ case HTTPErrorOnError:
+ httpError(rsp, err)
+ return
+ }
+ }
+ }
+
+ if lastErr != nil {
+ httpError(rsp, lastErr)
+ }
+ })
+
+ if opts.Timeout <= 0 {
+ return h
+ }
+ return http.TimeoutHandler(h, opts.Timeout, fmt.Sprintf(
+ "Exceeded configured timeout of %v.\n",
+ opts.Timeout,
+ ))
+}
+
+// InstrumentMetricHandler is usually used with an http.Handler returned by the
+// HandlerFor function. It instruments the provided http.Handler with two
+// metrics: A counter vector "promhttp_metric_handler_requests_total" to count
+// scrapes partitioned by HTTP status code, and a gauge
+// "promhttp_metric_handler_requests_in_flight" to track the number of
+// simultaneous scrapes. This function idempotently registers collectors for
+// both metrics with the provided Registerer. It panics if the registration
+// fails. The provided metrics are useful to see how many scrapes hit the
+// monitored target (which could be from different Prometheus servers or other
+// scrapers), and how often they overlap (which would result in more than one
+// scrape in flight at the same time). Note that the scrapes-in-flight gauge
+// will contain the scrape by which it is exposed, while the scrape counter will
+// only get incremented after the scrape is complete (as only then the status
+// code is known). For tracking scrape durations, use the
+// "scrape_duration_seconds" gauge created by the Prometheus server upon each
+// scrape.
+func InstrumentMetricHandler(reg prometheus.Registerer, handler http.Handler) http.Handler {
+ cnt := prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Name: "promhttp_metric_handler_requests_total",
+ Help: "Total number of scrapes by HTTP status code.",
+ },
+ []string{"code"},
+ )
+ // Initialize the most likely HTTP status codes.
+ cnt.WithLabelValues("200")
+ cnt.WithLabelValues("500")
+ cnt.WithLabelValues("503")
+ if err := reg.Register(cnt); err != nil {
+ if are, ok := err.(prometheus.AlreadyRegisteredError); ok {
+ cnt = are.ExistingCollector.(*prometheus.CounterVec)
+ } else {
+ panic(err)
+ }
+ }
+
+ gge := prometheus.NewGauge(prometheus.GaugeOpts{
+ Name: "promhttp_metric_handler_requests_in_flight",
+ Help: "Current number of scrapes being served.",
+ })
+ if err := reg.Register(gge); err != nil {
+ if are, ok := err.(prometheus.AlreadyRegisteredError); ok {
+ gge = are.ExistingCollector.(prometheus.Gauge)
+ } else {
+ panic(err)
+ }
+ }
+
+ return InstrumentHandlerCounter(cnt, InstrumentHandlerInFlight(gge, handler))
+}
+
+// HandlerErrorHandling defines how a Handler serving metrics will handle
+// errors.
+type HandlerErrorHandling int
+
+// These constants cause handlers serving metrics to behave as described if
+// errors are encountered.
+const (
+ // Serve an HTTP status code 500 upon the first error
+ // encountered. Report the error message in the body.
+ HTTPErrorOnError HandlerErrorHandling = iota
+ // Ignore errors and try to serve as many metrics as possible. However,
+ // if no metrics can be served, serve an HTTP status code 500 and the
+ // last error message in the body. Only use this in deliberate "best
+ // effort" metrics collection scenarios. In this case, it is highly
+ // recommended to provide other means of detecting errors: By setting an
+ // ErrorLog in HandlerOpts, the errors are logged. By providing a
+ // Registry in HandlerOpts, the exposed metrics include an error counter
+ // "promhttp_metric_handler_errors_total", which can be used for
+ // alerts.
+ ContinueOnError
+ // Panic upon the first error encountered (useful for "crash only" apps).
+ PanicOnError
+)
+
+// Logger is the minimal interface HandlerOpts needs for logging. Note that
+// log.Logger from the standard library implements this interface, and it is
+// easy to implement by custom loggers, if they don't do so already anyway.
+type Logger interface {
+ Println(v ...interface{})
+}
+
+// HandlerOpts specifies options how to serve metrics via an http.Handler. The
+// zero value of HandlerOpts is a reasonable default.
+type HandlerOpts struct {
+ // ErrorLog specifies an optional logger for errors collecting and
+ // serving metrics. If nil, errors are not logged at all.
+ ErrorLog Logger
+ // ErrorHandling defines how errors are handled. Note that errors are
+ // logged regardless of the configured ErrorHandling provided ErrorLog
+ // is not nil.
+ ErrorHandling HandlerErrorHandling
+ // If Registry is not nil, it is used to register a metric
+ // "promhttp_metric_handler_errors_total", partitioned by "cause". A
+ // failed registration causes a panic. Note that this error counter is
+ // different from the instrumentation you get from the various
+ // InstrumentHandler... helpers. It counts errors that don't necessarily
+ // result in a non-2xx HTTP status code. There are two typical cases:
+ // (1) Encoding errors that only happen after streaming of the HTTP body
+ // has already started (and the status code 200 has been sent). This
+ // should only happen with custom collectors. (2) Collection errors with
+ // no effect on the HTTP status code because ErrorHandling is set to
+ // ContinueOnError.
+ Registry prometheus.Registerer
+ // If DisableCompression is true, the handler will never compress the
+ // response, even if requested by the client.
+ DisableCompression bool
+ // The number of concurrent HTTP requests is limited to
+ // MaxRequestsInFlight. Additional requests are responded to with 503
+ // Service Unavailable and a suitable message in the body. If
+ // MaxRequestsInFlight is 0 or negative, no limit is applied.
+ MaxRequestsInFlight int
+ // If handling a request takes longer than Timeout, it is responded to
+ // with 503 ServiceUnavailable and a suitable Message. No timeout is
+ // applied if Timeout is 0 or negative. Note that with the current
+ // implementation, reaching the timeout simply ends the HTTP requests as
+ // described above (and even that only if sending of the body hasn't
+ // started yet), while the bulk work of gathering all the metrics keeps
+ // running in the background (with the eventual result to be thrown
+ // away). Until the implementation is improved, it is recommended to
+ // implement a separate timeout in potentially slow Collectors.
+ Timeout time.Duration
+}
+
+// gzipAccepted returns whether the client will accept gzip-encoded content.
+func gzipAccepted(header http.Header) bool {
+ a := header.Get(acceptEncodingHeader)
+ parts := strings.Split(a, ",")
+ for _, part := range parts {
+ part = strings.TrimSpace(part)
+ if part == "gzip" || strings.HasPrefix(part, "gzip;") {
+ return true
+ }
+ }
+ return false
+}
+
+// httpError removes any content-encoding header and then calls http.Error with
+// the provided error and http.StatusInternalServerErrer. Error contents is
+// supposed to be uncompressed plain text. However, same as with a plain
+// http.Error, any header settings will be void if the header has already been
+// sent. The error message will still be written to the writer, but it will
+// probably be of limited use.
+func httpError(rsp http.ResponseWriter, err error) {
+ rsp.Header().Del(contentEncodingHeader)
+ http.Error(
+ rsp,
+ "An error has occurred while serving metrics:\n\n"+err.Error(),
+ http.StatusInternalServerError,
+ )
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go
new file mode 100644
index 000000000..83c49b66a
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go
@@ -0,0 +1,219 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package promhttp
+
+import (
+ "crypto/tls"
+ "net/http"
+ "net/http/httptrace"
+ "time"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+// The RoundTripperFunc type is an adapter to allow the use of ordinary
+// functions as RoundTrippers. If f is a function with the appropriate
+// signature, RountTripperFunc(f) is a RoundTripper that calls f.
+type RoundTripperFunc func(req *http.Request) (*http.Response, error)
+
+// RoundTrip implements the RoundTripper interface.
+func (rt RoundTripperFunc) RoundTrip(r *http.Request) (*http.Response, error) {
+ return rt(r)
+}
+
+// InstrumentRoundTripperInFlight is a middleware that wraps the provided
+// http.RoundTripper. It sets the provided prometheus.Gauge to the number of
+// requests currently handled by the wrapped http.RoundTripper.
+//
+// See the example for ExampleInstrumentRoundTripperDuration for example usage.
+func InstrumentRoundTripperInFlight(gauge prometheus.Gauge, next http.RoundTripper) RoundTripperFunc {
+ return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
+ gauge.Inc()
+ defer gauge.Dec()
+ return next.RoundTrip(r)
+ })
+}
+
+// InstrumentRoundTripperCounter is a middleware that wraps the provided
+// http.RoundTripper to observe the request result with the provided CounterVec.
+// The CounterVec must have zero, one, or two non-const non-curried labels. For
+// those, the only allowed label names are "code" and "method". The function
+// panics otherwise. Partitioning of the CounterVec happens by HTTP status code
+// and/or HTTP method if the respective instance label names are present in the
+// CounterVec. For unpartitioned counting, use a CounterVec with zero labels.
+//
+// If the wrapped RoundTripper panics or returns a non-nil error, the Counter
+// is not incremented.
+//
+// See the example for ExampleInstrumentRoundTripperDuration for example usage.
+func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.RoundTripper) RoundTripperFunc {
+ code, method := checkLabels(counter)
+
+ return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
+ resp, err := next.RoundTrip(r)
+ if err == nil {
+ counter.With(labels(code, method, r.Method, resp.StatusCode)).Inc()
+ }
+ return resp, err
+ })
+}
+
+// InstrumentRoundTripperDuration is a middleware that wraps the provided
+// http.RoundTripper to observe the request duration with the provided
+// ObserverVec. The ObserverVec must have zero, one, or two non-const
+// non-curried labels. For those, the only allowed label names are "code" and
+// "method". The function panics otherwise. The Observe method of the Observer
+// in the ObserverVec is called with the request duration in
+// seconds. Partitioning happens by HTTP status code and/or HTTP method if the
+// respective instance label names are present in the ObserverVec. For
+// unpartitioned observations, use an ObserverVec with zero labels. Note that
+// partitioning of Histograms is expensive and should be used judiciously.
+//
+// If the wrapped RoundTripper panics or returns a non-nil error, no values are
+// reported.
+//
+// Note that this method is only guaranteed to never observe negative durations
+// if used with Go1.9+.
+func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundTripper) RoundTripperFunc {
+ code, method := checkLabels(obs)
+
+ return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
+ start := time.Now()
+ resp, err := next.RoundTrip(r)
+ if err == nil {
+ obs.With(labels(code, method, r.Method, resp.StatusCode)).Observe(time.Since(start).Seconds())
+ }
+ return resp, err
+ })
+}
+
+// InstrumentTrace is used to offer flexibility in instrumenting the available
+// httptrace.ClientTrace hook functions. Each function is passed a float64
+// representing the time in seconds since the start of the http request. A user
+// may choose to use separately buckets Histograms, or implement custom
+// instance labels on a per function basis.
+type InstrumentTrace struct {
+ GotConn func(float64)
+ PutIdleConn func(float64)
+ GotFirstResponseByte func(float64)
+ Got100Continue func(float64)
+ DNSStart func(float64)
+ DNSDone func(float64)
+ ConnectStart func(float64)
+ ConnectDone func(float64)
+ TLSHandshakeStart func(float64)
+ TLSHandshakeDone func(float64)
+ WroteHeaders func(float64)
+ Wait100Continue func(float64)
+ WroteRequest func(float64)
+}
+
+// InstrumentRoundTripperTrace is a middleware that wraps the provided
+// RoundTripper and reports times to hook functions provided in the
+// InstrumentTrace struct. Hook functions that are not present in the provided
+// InstrumentTrace struct are ignored. Times reported to the hook functions are
+// time since the start of the request. Only with Go1.9+, those times are
+// guaranteed to never be negative. (Earlier Go versions are not using a
+// monotonic clock.) Note that partitioning of Histograms is expensive and
+// should be used judiciously.
+//
+// For hook functions that receive an error as an argument, no observations are
+// made in the event of a non-nil error value.
+//
+// See the example for ExampleInstrumentRoundTripperDuration for example usage.
+func InstrumentRoundTripperTrace(it *InstrumentTrace, next http.RoundTripper) RoundTripperFunc {
+ return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
+ start := time.Now()
+
+ trace := &httptrace.ClientTrace{
+ GotConn: func(_ httptrace.GotConnInfo) {
+ if it.GotConn != nil {
+ it.GotConn(time.Since(start).Seconds())
+ }
+ },
+ PutIdleConn: func(err error) {
+ if err != nil {
+ return
+ }
+ if it.PutIdleConn != nil {
+ it.PutIdleConn(time.Since(start).Seconds())
+ }
+ },
+ DNSStart: func(_ httptrace.DNSStartInfo) {
+ if it.DNSStart != nil {
+ it.DNSStart(time.Since(start).Seconds())
+ }
+ },
+ DNSDone: func(_ httptrace.DNSDoneInfo) {
+ if it.DNSDone != nil {
+ it.DNSDone(time.Since(start).Seconds())
+ }
+ },
+ ConnectStart: func(_, _ string) {
+ if it.ConnectStart != nil {
+ it.ConnectStart(time.Since(start).Seconds())
+ }
+ },
+ ConnectDone: func(_, _ string, err error) {
+ if err != nil {
+ return
+ }
+ if it.ConnectDone != nil {
+ it.ConnectDone(time.Since(start).Seconds())
+ }
+ },
+ GotFirstResponseByte: func() {
+ if it.GotFirstResponseByte != nil {
+ it.GotFirstResponseByte(time.Since(start).Seconds())
+ }
+ },
+ Got100Continue: func() {
+ if it.Got100Continue != nil {
+ it.Got100Continue(time.Since(start).Seconds())
+ }
+ },
+ TLSHandshakeStart: func() {
+ if it.TLSHandshakeStart != nil {
+ it.TLSHandshakeStart(time.Since(start).Seconds())
+ }
+ },
+ TLSHandshakeDone: func(_ tls.ConnectionState, err error) {
+ if err != nil {
+ return
+ }
+ if it.TLSHandshakeDone != nil {
+ it.TLSHandshakeDone(time.Since(start).Seconds())
+ }
+ },
+ WroteHeaders: func() {
+ if it.WroteHeaders != nil {
+ it.WroteHeaders(time.Since(start).Seconds())
+ }
+ },
+ Wait100Continue: func() {
+ if it.Wait100Continue != nil {
+ it.Wait100Continue(time.Since(start).Seconds())
+ }
+ },
+ WroteRequest: func(_ httptrace.WroteRequestInfo) {
+ if it.WroteRequest != nil {
+ it.WroteRequest(time.Since(start).Seconds())
+ }
+ },
+ }
+ r = r.WithContext(httptrace.WithClientTrace(r.Context(), trace))
+
+ return next.RoundTrip(r)
+ })
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go
new file mode 100644
index 000000000..9db243805
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go
@@ -0,0 +1,447 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package promhttp
+
+import (
+ "errors"
+ "net/http"
+ "strconv"
+ "strings"
+ "time"
+
+ dto "github.com/prometheus/client_model/go"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+// magicString is used for the hacky label test in checkLabels. Remove once fixed.
+const magicString = "zZgWfBxLqvG8kc8IMv3POi2Bb0tZI3vAnBx+gBaFi9FyPzB/CzKUer1yufDa"
+
+// InstrumentHandlerInFlight is a middleware that wraps the provided
+// http.Handler. It sets the provided prometheus.Gauge to the number of
+// requests currently handled by the wrapped http.Handler.
+//
+// See the example for InstrumentHandlerDuration for example usage.
+func InstrumentHandlerInFlight(g prometheus.Gauge, next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ g.Inc()
+ defer g.Dec()
+ next.ServeHTTP(w, r)
+ })
+}
+
+// InstrumentHandlerDuration is a middleware that wraps the provided
+// http.Handler to observe the request duration with the provided ObserverVec.
+// The ObserverVec must have zero, one, or two non-const non-curried labels. For
+// those, the only allowed label names are "code" and "method". The function
+// panics otherwise. The Observe method of the Observer in the ObserverVec is
+// called with the request duration in seconds. Partitioning happens by HTTP
+// status code and/or HTTP method if the respective instance label names are
+// present in the ObserverVec. For unpartitioned observations, use an
+// ObserverVec with zero labels. Note that partitioning of Histograms is
+// expensive and should be used judiciously.
+//
+// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
+//
+// If the wrapped Handler panics, no values are reported.
+//
+// Note that this method is only guaranteed to never observe negative durations
+// if used with Go1.9+.
+func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc {
+ code, method := checkLabels(obs)
+
+ if code {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ now := time.Now()
+ d := newDelegator(w, nil)
+ next.ServeHTTP(d, r)
+
+ obs.With(labels(code, method, r.Method, d.Status())).Observe(time.Since(now).Seconds())
+ })
+ }
+
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ now := time.Now()
+ next.ServeHTTP(w, r)
+ obs.With(labels(code, method, r.Method, 0)).Observe(time.Since(now).Seconds())
+ })
+}
+
+// InstrumentHandlerCounter is a middleware that wraps the provided http.Handler
+// to observe the request result with the provided CounterVec. The CounterVec
+// must have zero, one, or two non-const non-curried labels. For those, the only
+// allowed label names are "code" and "method". The function panics
+// otherwise. Partitioning of the CounterVec happens by HTTP status code and/or
+// HTTP method if the respective instance label names are present in the
+// CounterVec. For unpartitioned counting, use a CounterVec with zero labels.
+//
+// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
+//
+// If the wrapped Handler panics, the Counter is not incremented.
+//
+// See the example for InstrumentHandlerDuration for example usage.
+func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler) http.HandlerFunc {
+ code, method := checkLabels(counter)
+
+ if code {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ d := newDelegator(w, nil)
+ next.ServeHTTP(d, r)
+ counter.With(labels(code, method, r.Method, d.Status())).Inc()
+ })
+ }
+
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ next.ServeHTTP(w, r)
+ counter.With(labels(code, method, r.Method, 0)).Inc()
+ })
+}
+
+// InstrumentHandlerTimeToWriteHeader is a middleware that wraps the provided
+// http.Handler to observe with the provided ObserverVec the request duration
+// until the response headers are written. The ObserverVec must have zero, one,
+// or two non-const non-curried labels. For those, the only allowed label names
+// are "code" and "method". The function panics otherwise. The Observe method of
+// the Observer in the ObserverVec is called with the request duration in
+// seconds. Partitioning happens by HTTP status code and/or HTTP method if the
+// respective instance label names are present in the ObserverVec. For
+// unpartitioned observations, use an ObserverVec with zero labels. Note that
+// partitioning of Histograms is expensive and should be used judiciously.
+//
+// If the wrapped Handler panics before calling WriteHeader, no value is
+// reported.
+//
+// Note that this method is only guaranteed to never observe negative durations
+// if used with Go1.9+.
+//
+// See the example for InstrumentHandlerDuration for example usage.
+func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc {
+ code, method := checkLabels(obs)
+
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ now := time.Now()
+ d := newDelegator(w, func(status int) {
+ obs.With(labels(code, method, r.Method, status)).Observe(time.Since(now).Seconds())
+ })
+ next.ServeHTTP(d, r)
+ })
+}
+
+// InstrumentHandlerRequestSize is a middleware that wraps the provided
+// http.Handler to observe the request size with the provided ObserverVec. The
+// ObserverVec must have zero, one, or two non-const non-curried labels. For
+// those, the only allowed label names are "code" and "method". The function
+// panics otherwise. The Observe method of the Observer in the ObserverVec is
+// called with the request size in bytes. Partitioning happens by HTTP status
+// code and/or HTTP method if the respective instance label names are present in
+// the ObserverVec. For unpartitioned observations, use an ObserverVec with zero
+// labels. Note that partitioning of Histograms is expensive and should be used
+// judiciously.
+//
+// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
+//
+// If the wrapped Handler panics, no values are reported.
+//
+// See the example for InstrumentHandlerDuration for example usage.
+func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc {
+ code, method := checkLabels(obs)
+
+ if code {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ d := newDelegator(w, nil)
+ next.ServeHTTP(d, r)
+ size := computeApproximateRequestSize(r)
+ obs.With(labels(code, method, r.Method, d.Status())).Observe(float64(size))
+ })
+ }
+
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ next.ServeHTTP(w, r)
+ size := computeApproximateRequestSize(r)
+ obs.With(labels(code, method, r.Method, 0)).Observe(float64(size))
+ })
+}
+
+// InstrumentHandlerResponseSize is a middleware that wraps the provided
+// http.Handler to observe the response size with the provided ObserverVec. The
+// ObserverVec must have zero, one, or two non-const non-curried labels. For
+// those, the only allowed label names are "code" and "method". The function
+// panics otherwise. The Observe method of the Observer in the ObserverVec is
+// called with the response size in bytes. Partitioning happens by HTTP status
+// code and/or HTTP method if the respective instance label names are present in
+// the ObserverVec. For unpartitioned observations, use an ObserverVec with zero
+// labels. Note that partitioning of Histograms is expensive and should be used
+// judiciously.
+//
+// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
+//
+// If the wrapped Handler panics, no values are reported.
+//
+// See the example for InstrumentHandlerDuration for example usage.
+func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler) http.Handler {
+ code, method := checkLabels(obs)
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ d := newDelegator(w, nil)
+ next.ServeHTTP(d, r)
+ obs.With(labels(code, method, r.Method, d.Status())).Observe(float64(d.Written()))
+ })
+}
+
+func checkLabels(c prometheus.Collector) (code bool, method bool) {
+ // TODO(beorn7): Remove this hacky way to check for instance labels
+ // once Descriptors can have their dimensionality queried.
+ var (
+ desc *prometheus.Desc
+ m prometheus.Metric
+ pm dto.Metric
+ lvs []string
+ )
+
+ // Get the Desc from the Collector.
+ descc := make(chan *prometheus.Desc, 1)
+ c.Describe(descc)
+
+ select {
+ case desc = <-descc:
+ default:
+ panic("no description provided by collector")
+ }
+ select {
+ case <-descc:
+ panic("more than one description provided by collector")
+ default:
+ }
+
+ close(descc)
+
+ // Create a ConstMetric with the Desc. Since we don't know how many
+ // variable labels there are, try for as long as it needs.
+ for err := errors.New("dummy"); err != nil; lvs = append(lvs, magicString) {
+ m, err = prometheus.NewConstMetric(desc, prometheus.UntypedValue, 0, lvs...)
+ }
+
+ // Write out the metric into a proto message and look at the labels.
+ // If the value is not the magicString, it is a constLabel, which doesn't interest us.
+ // If the label is curried, it doesn't interest us.
+ // In all other cases, only "code" or "method" is allowed.
+ if err := m.Write(&pm); err != nil {
+ panic("error checking metric for labels")
+ }
+ for _, label := range pm.Label {
+ name, value := label.GetName(), label.GetValue()
+ if value != magicString || isLabelCurried(c, name) {
+ continue
+ }
+ switch name {
+ case "code":
+ code = true
+ case "method":
+ method = true
+ default:
+ panic("metric partitioned with non-supported labels")
+ }
+ }
+ return
+}
+
+func isLabelCurried(c prometheus.Collector, label string) bool {
+ // This is even hackier than the label test above.
+ // We essentially try to curry again and see if it works.
+ // But for that, we need to type-convert to the two
+ // types we use here, ObserverVec or *CounterVec.
+ switch v := c.(type) {
+ case *prometheus.CounterVec:
+ if _, err := v.CurryWith(prometheus.Labels{label: "dummy"}); err == nil {
+ return false
+ }
+ case prometheus.ObserverVec:
+ if _, err := v.CurryWith(prometheus.Labels{label: "dummy"}); err == nil {
+ return false
+ }
+ default:
+ panic("unsupported metric vec type")
+ }
+ return true
+}
+
+// emptyLabels is a one-time allocation for non-partitioned metrics to avoid
+// unnecessary allocations on each request.
+var emptyLabels = prometheus.Labels{}
+
+func labels(code, method bool, reqMethod string, status int) prometheus.Labels {
+ if !(code || method) {
+ return emptyLabels
+ }
+ labels := prometheus.Labels{}
+
+ if code {
+ labels["code"] = sanitizeCode(status)
+ }
+ if method {
+ labels["method"] = sanitizeMethod(reqMethod)
+ }
+
+ return labels
+}
+
+func computeApproximateRequestSize(r *http.Request) int {
+ s := 0
+ if r.URL != nil {
+ s += len(r.URL.String())
+ }
+
+ s += len(r.Method)
+ s += len(r.Proto)
+ for name, values := range r.Header {
+ s += len(name)
+ for _, value := range values {
+ s += len(value)
+ }
+ }
+ s += len(r.Host)
+
+ // N.B. r.Form and r.MultipartForm are assumed to be included in r.URL.
+
+ if r.ContentLength != -1 {
+ s += int(r.ContentLength)
+ }
+ return s
+}
+
+func sanitizeMethod(m string) string {
+ switch m {
+ case "GET", "get":
+ return "get"
+ case "PUT", "put":
+ return "put"
+ case "HEAD", "head":
+ return "head"
+ case "POST", "post":
+ return "post"
+ case "DELETE", "delete":
+ return "delete"
+ case "CONNECT", "connect":
+ return "connect"
+ case "OPTIONS", "options":
+ return "options"
+ case "NOTIFY", "notify":
+ return "notify"
+ default:
+ return strings.ToLower(m)
+ }
+}
+
+// If the wrapped http.Handler has not set a status code, i.e. the value is
+// currently 0, santizeCode will return 200, for consistency with behavior in
+// the stdlib.
+func sanitizeCode(s int) string {
+ switch s {
+ case 100:
+ return "100"
+ case 101:
+ return "101"
+
+ case 200, 0:
+ return "200"
+ case 201:
+ return "201"
+ case 202:
+ return "202"
+ case 203:
+ return "203"
+ case 204:
+ return "204"
+ case 205:
+ return "205"
+ case 206:
+ return "206"
+
+ case 300:
+ return "300"
+ case 301:
+ return "301"
+ case 302:
+ return "302"
+ case 304:
+ return "304"
+ case 305:
+ return "305"
+ case 307:
+ return "307"
+
+ case 400:
+ return "400"
+ case 401:
+ return "401"
+ case 402:
+ return "402"
+ case 403:
+ return "403"
+ case 404:
+ return "404"
+ case 405:
+ return "405"
+ case 406:
+ return "406"
+ case 407:
+ return "407"
+ case 408:
+ return "408"
+ case 409:
+ return "409"
+ case 410:
+ return "410"
+ case 411:
+ return "411"
+ case 412:
+ return "412"
+ case 413:
+ return "413"
+ case 414:
+ return "414"
+ case 415:
+ return "415"
+ case 416:
+ return "416"
+ case 417:
+ return "417"
+ case 418:
+ return "418"
+
+ case 500:
+ return "500"
+ case 501:
+ return "501"
+ case 502:
+ return "502"
+ case 503:
+ return "503"
+ case 504:
+ return "504"
+ case 505:
+ return "505"
+
+ case 428:
+ return "428"
+ case 429:
+ return "429"
+ case 431:
+ return "431"
+ case 511:
+ return "511"
+
+ default:
+ return strconv.Itoa(s)
+ }
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go
index 73dde7beb..c05d6ee1b 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/registry.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/registry.go
@@ -15,17 +15,23 @@ package prometheus
import (
"bytes"
- "errors"
"fmt"
+ "io/ioutil"
"os"
+ "path/filepath"
"runtime"
"sort"
+ "strings"
"sync"
"unicode/utf8"
+ "github.com/cespare/xxhash/v2"
"github.com/golang/protobuf/proto"
+ "github.com/prometheus/common/expfmt"
dto "github.com/prometheus/client_model/go"
+
+ "github.com/prometheus/client_golang/prometheus/internal"
)
const (
@@ -37,13 +43,14 @@ const (
// DefaultRegisterer and DefaultGatherer are the implementations of the
// Registerer and Gatherer interface a number of convenience functions in this
// package act on. Initially, both variables point to the same Registry, which
-// has a process collector (see NewProcessCollector) and a Go collector (see
-// NewGoCollector) already registered. This approach to keep default instances
-// as global state mirrors the approach of other packages in the Go standard
-// library. Note that there are caveats. Change the variables with caution and
-// only if you understand the consequences. Users who want to avoid global state
-// altogether should not use the convenience function and act on custom
-// instances instead.
+// has a process collector (currently on Linux only, see NewProcessCollector)
+// and a Go collector (see NewGoCollector, in particular the note about
+// stop-the-world implication with Go versions older than 1.9) already
+// registered. This approach to keep default instances as global state mirrors
+// the approach of other packages in the Go standard library. Note that there
+// are caveats. Change the variables with caution and only if you understand the
+// consequences. Users who want to avoid global state altogether should not use
+// the convenience functions and act on custom instances instead.
var (
defaultRegistry = NewRegistry()
DefaultRegisterer Registerer = defaultRegistry
@@ -51,7 +58,7 @@ var (
)
func init() {
- MustRegister(NewProcessCollector(os.Getpid(), ""))
+ MustRegister(NewProcessCollector(ProcessCollectorOpts{}))
MustRegister(NewGoCollector())
}
@@ -67,7 +74,8 @@ func NewRegistry() *Registry {
// NewPedanticRegistry returns a registry that checks during collection if each
// collected Metric is consistent with its reported Desc, and if the Desc has
-// actually been registered with the registry.
+// actually been registered with the registry. Unchecked Collectors (those whose
+// Describe method does not yield any descriptors) are excluded from the check.
//
// Usually, a Registry will be happy as long as the union of all collected
// Metrics is consistent and valid even if some metrics are not consistent with
@@ -97,8 +105,13 @@ type Registerer interface {
// returned error is an instance of AlreadyRegisteredError, which
// contains the previously registered Collector.
//
- // It is in general not safe to register the same Collector multiple
- // times concurrently.
+ // A Collector whose Describe method does not yield any Desc is treated
+ // as unchecked. Registration will always succeed. No check for
+ // re-registering (see previous paragraph) is performed. Thus, the
+ // caller is responsible for not double-registering the same unchecked
+ // Collector, and for providing a Collector that will not cause
+ // inconsistent metrics on collection. (This would lead to scrape
+ // errors.)
Register(Collector) error
// MustRegister works like Register but registers any number of
// Collectors and panics upon the first registration that causes an
@@ -107,7 +120,9 @@ type Registerer interface {
// Unregister unregisters the Collector that equals the Collector passed
// in as an argument. (Two Collectors are considered equal if their
// Describe method yields the same set of descriptors.) The function
- // returns whether a Collector was unregistered.
+ // returns whether a Collector was unregistered. Note that an unchecked
+ // Collector cannot be unregistered (as its Describe method does not
+ // yield any descriptor).
//
// Note that even after unregistering, it will not be possible to
// register a new Collector that is inconsistent with the unregistered
@@ -125,15 +140,23 @@ type Registerer interface {
type Gatherer interface {
// Gather calls the Collect method of the registered Collectors and then
// gathers the collected metrics into a lexicographically sorted slice
- // of MetricFamily protobufs. Even if an error occurs, Gather attempts
- // to gather as many metrics as possible. Hence, if a non-nil error is
- // returned, the returned MetricFamily slice could be nil (in case of a
- // fatal error that prevented any meaningful metric collection) or
- // contain a number of MetricFamily protobufs, some of which might be
- // incomplete, and some might be missing altogether. The returned error
- // (which might be a MultiError) explains the details. In scenarios
- // where complete collection is critical, the returned MetricFamily
- // protobufs should be disregarded if the returned error is non-nil.
+ // of uniquely named MetricFamily protobufs. Gather ensures that the
+ // returned slice is valid and self-consistent so that it can be used
+ // for valid exposition. As an exception to the strict consistency
+ // requirements described for metric.Desc, Gather will tolerate
+ // different sets of label names for metrics of the same metric family.
+ //
+ // Even if an error occurs, Gather attempts to gather as many metrics as
+ // possible. Hence, if a non-nil error is returned, the returned
+ // MetricFamily slice could be nil (in case of a fatal error that
+ // prevented any meaningful metric collection) or contain a number of
+ // MetricFamily protobufs, some of which might be incomplete, and some
+ // might be missing altogether. The returned error (which might be a
+ // MultiError) explains the details. Note that this is mostly useful for
+ // debugging purposes. If the gathered protobufs are to be used for
+ // exposition in actual monitoring, it is almost always better to not
+ // expose an incomplete result and instead disregard the returned
+ // MetricFamily protobufs in case the returned error is non-nil.
Gather() ([]*dto.MetricFamily, error)
}
@@ -234,6 +257,7 @@ type Registry struct {
collectorsByID map[uint64]Collector // ID is a hash of the descIDs.
descIDs map[uint64]struct{}
dimHashesByName map[string]uint64
+ uncheckedCollectors []Collector
pedanticChecksEnabled bool
}
@@ -243,7 +267,7 @@ func (r *Registry) Register(c Collector) error {
descChan = make(chan *Desc, capDescChan)
newDescIDs = map[uint64]struct{}{}
newDimHashesByName = map[string]uint64{}
- collectorID uint64 // Just a sum of all desc IDs.
+ collectorID uint64 // All desc IDs XOR'd together.
duplicateDescErr error
)
go func() {
@@ -251,7 +275,12 @@ func (r *Registry) Register(c Collector) error {
close(descChan)
}()
r.mtx.Lock()
- defer r.mtx.Unlock()
+ defer func() {
+ // Drain channel in case of premature return to not leak a goroutine.
+ for range descChan {
+ }
+ r.mtx.Unlock()
+ }()
// Conduct various tests...
for desc := range descChan {
@@ -265,12 +294,12 @@ func (r *Registry) Register(c Collector) error {
if _, exists := r.descIDs[desc.id]; exists {
duplicateDescErr = fmt.Errorf("descriptor %s already exists with the same fully-qualified name and const label values", desc)
}
- // If it is not a duplicate desc in this collector, add it to
+ // If it is not a duplicate desc in this collector, XOR it to
// the collectorID. (We allow duplicate descs within the same
// collector, but their existence must be a no-op.)
if _, exists := newDescIDs[desc.id]; !exists {
newDescIDs[desc.id] = struct{}{}
- collectorID += desc.id
+ collectorID ^= desc.id
}
// Are all the label names and the help string consistent with
@@ -291,14 +320,23 @@ func (r *Registry) Register(c Collector) error {
}
}
}
- // Did anything happen at all?
+ // A Collector yielding no Desc at all is considered unchecked.
if len(newDescIDs) == 0 {
- return errors.New("collector has no descriptors")
+ r.uncheckedCollectors = append(r.uncheckedCollectors, c)
+ return nil
}
if existing, exists := r.collectorsByID[collectorID]; exists {
- return AlreadyRegisteredError{
- ExistingCollector: existing,
- NewCollector: c,
+ switch e := existing.(type) {
+ case *wrappingCollector:
+ return AlreadyRegisteredError{
+ ExistingCollector: e.unwrapRecursively(),
+ NewCollector: c,
+ }
+ default:
+ return AlreadyRegisteredError{
+ ExistingCollector: e,
+ NewCollector: c,
+ }
}
}
// If the collectorID is new, but at least one of the descs existed
@@ -323,7 +361,7 @@ func (r *Registry) Unregister(c Collector) bool {
var (
descChan = make(chan *Desc, capDescChan)
descIDs = map[uint64]struct{}{}
- collectorID uint64 // Just a sum of the desc IDs.
+ collectorID uint64 // All desc IDs XOR'd together.
)
go func() {
c.Describe(descChan)
@@ -331,7 +369,7 @@ func (r *Registry) Unregister(c Collector) bool {
}()
for desc := range descChan {
if _, exists := descIDs[desc.id]; !exists {
- collectorID += desc.id
+ collectorID ^= desc.id
descIDs[desc.id] = struct{}{}
}
}
@@ -367,20 +405,24 @@ func (r *Registry) MustRegister(cs ...Collector) {
// Gather implements Gatherer.
func (r *Registry) Gather() ([]*dto.MetricFamily, error) {
var (
- metricChan = make(chan Metric, capMetricChan)
- metricHashes = map[uint64]struct{}{}
- dimHashes = map[string]uint64{}
- wg sync.WaitGroup
- errs MultiError // The collected errors to return in the end.
- registeredDescIDs map[uint64]struct{} // Only used for pedantic checks
+ checkedMetricChan = make(chan Metric, capMetricChan)
+ uncheckedMetricChan = make(chan Metric, capMetricChan)
+ metricHashes = map[uint64]struct{}{}
+ wg sync.WaitGroup
+ errs MultiError // The collected errors to return in the end.
+ registeredDescIDs map[uint64]struct{} // Only used for pedantic checks
)
r.mtx.RLock()
- goroutineBudget := len(r.collectorsByID)
+ goroutineBudget := len(r.collectorsByID) + len(r.uncheckedCollectors)
metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName))
- collectors := make(chan Collector, len(r.collectorsByID))
+ checkedCollectors := make(chan Collector, len(r.collectorsByID))
+ uncheckedCollectors := make(chan Collector, len(r.uncheckedCollectors))
for _, collector := range r.collectorsByID {
- collectors <- collector
+ checkedCollectors <- collector
+ }
+ for _, collector := range r.uncheckedCollectors {
+ uncheckedCollectors <- collector
}
// In case pedantic checks are enabled, we have to copy the map before
// giving up the RLock.
@@ -397,12 +439,14 @@ func (r *Registry) Gather() ([]*dto.MetricFamily, error) {
collectWorker := func() {
for {
select {
- case collector := <-collectors:
- collector.Collect(metricChan)
- wg.Done()
+ case collector := <-checkedCollectors:
+ collector.Collect(checkedMetricChan)
+ case collector := <-uncheckedCollectors:
+ collector.Collect(uncheckedMetricChan)
default:
return
}
+ wg.Done()
}
}
@@ -410,53 +454,128 @@ func (r *Registry) Gather() ([]*dto.MetricFamily, error) {
go collectWorker()
goroutineBudget--
- // Close the metricChan once all collectors are collected.
+ // Close checkedMetricChan and uncheckedMetricChan once all collectors
+ // are collected.
go func() {
wg.Wait()
- close(metricChan)
+ close(checkedMetricChan)
+ close(uncheckedMetricChan)
}()
- // Drain metricChan in case of premature return.
+ // Drain checkedMetricChan and uncheckedMetricChan in case of premature return.
defer func() {
- for range metricChan {
+ if checkedMetricChan != nil {
+ for range checkedMetricChan {
+ }
+ }
+ if uncheckedMetricChan != nil {
+ for range uncheckedMetricChan {
+ }
}
}()
-collectLoop:
+ // Copy the channel references so we can nil them out later to remove
+ // them from the select statements below.
+ cmc := checkedMetricChan
+ umc := uncheckedMetricChan
+
for {
select {
- case metric, ok := <-metricChan:
+ case metric, ok := <-cmc:
if !ok {
- // metricChan is closed, we are done.
- break collectLoop
+ cmc = nil
+ break
}
errs.Append(processMetric(
metric, metricFamiliesByName,
- metricHashes, dimHashes,
+ metricHashes,
registeredDescIDs,
))
+ case metric, ok := <-umc:
+ if !ok {
+ umc = nil
+ break
+ }
+ errs.Append(processMetric(
+ metric, metricFamiliesByName,
+ metricHashes,
+ nil,
+ ))
default:
- if goroutineBudget <= 0 || len(collectors) == 0 {
- // All collectors are aleady being worked on or
+ if goroutineBudget <= 0 || len(checkedCollectors)+len(uncheckedCollectors) == 0 {
+ // All collectors are already being worked on or
// we have already as many goroutines started as
- // there are collectors. Just process metrics
- // from now on.
- for metric := range metricChan {
+ // there are collectors. Do the same as above,
+ // just without the default.
+ select {
+ case metric, ok := <-cmc:
+ if !ok {
+ cmc = nil
+ break
+ }
errs.Append(processMetric(
metric, metricFamiliesByName,
- metricHashes, dimHashes,
+ metricHashes,
registeredDescIDs,
))
+ case metric, ok := <-umc:
+ if !ok {
+ umc = nil
+ break
+ }
+ errs.Append(processMetric(
+ metric, metricFamiliesByName,
+ metricHashes,
+ nil,
+ ))
}
- break collectLoop
+ break
}
// Start more workers.
go collectWorker()
goroutineBudget--
runtime.Gosched()
}
+ // Once both checkedMetricChan and uncheckdMetricChan are closed
+ // and drained, the contraption above will nil out cmc and umc,
+ // and then we can leave the collect loop here.
+ if cmc == nil && umc == nil {
+ break
+ }
+ }
+ return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap()
+}
+
+// WriteToTextfile calls Gather on the provided Gatherer, encodes the result in the
+// Prometheus text format, and writes it to a temporary file. Upon success, the
+// temporary file is renamed to the provided filename.
+//
+// This is intended for use with the textfile collector of the node exporter.
+// Note that the node exporter expects the filename to be suffixed with ".prom".
+func WriteToTextfile(filename string, g Gatherer) error {
+ tmp, err := ioutil.TempFile(filepath.Dir(filename), filepath.Base(filename))
+ if err != nil {
+ return err
+ }
+ defer os.Remove(tmp.Name())
+
+ mfs, err := g.Gather()
+ if err != nil {
+ return err
}
- return normalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap()
+ for _, mf := range mfs {
+ if _, err := expfmt.MetricFamilyToText(tmp, mf); err != nil {
+ return err
+ }
+ }
+ if err := tmp.Close(); err != nil {
+ return err
+ }
+
+ if err := os.Chmod(tmp.Name(), 0644); err != nil {
+ return err
+ }
+ return os.Rename(tmp.Name(), filename)
}
// processMetric is an internal helper method only used by the Gather method.
@@ -464,16 +583,20 @@ func processMetric(
metric Metric,
metricFamiliesByName map[string]*dto.MetricFamily,
metricHashes map[uint64]struct{},
- dimHashes map[string]uint64,
registeredDescIDs map[uint64]struct{},
) error {
desc := metric.Desc()
+ // Wrapped metrics collected by an unchecked Collector can have an
+ // invalid Desc.
+ if desc.err != nil {
+ return desc.err
+ }
dtoMetric := &dto.Metric{}
if err := metric.Write(dtoMetric); err != nil {
return fmt.Errorf("error collecting metric %v: %s", desc, err)
}
metricFamily, ok := metricFamiliesByName[desc.fqName]
- if ok {
+ if ok { // Existing name.
if metricFamily.GetHelp() != desc.help {
return fmt.Errorf(
"collected metric %s %s has help %q but should have %q",
@@ -520,7 +643,7 @@ func processMetric(
default:
panic("encountered MetricFamily with invalid type")
}
- } else {
+ } else { // New name.
metricFamily = &dto.MetricFamily{}
metricFamily.Name = proto.String(desc.fqName)
metricFamily.Help = proto.String(desc.help)
@@ -539,9 +662,12 @@ func processMetric(
default:
return fmt.Errorf("empty metric collected: %s", dtoMetric)
}
+ if err := checkSuffixCollisions(metricFamily, metricFamiliesByName); err != nil {
+ return err
+ }
metricFamiliesByName[desc.fqName] = metricFamily
}
- if err := checkMetricConsistency(metricFamily, dtoMetric, metricHashes, dimHashes); err != nil {
+ if err := checkMetricConsistency(metricFamily, dtoMetric, metricHashes); err != nil {
return err
}
if registeredDescIDs != nil {
@@ -563,7 +689,7 @@ func processMetric(
// Gatherers is a slice of Gatherer instances that implements the Gatherer
// interface itself. Its Gather method calls Gather on all Gatherers in the
// slice in order and returns the merged results. Errors returned from the
-// Gather calles are all returned in a flattened MultiError. Duplicate and
+// Gather calls are all returned in a flattened MultiError. Duplicate and
// inconsistent Metrics are skipped (first occurrence in slice order wins) and
// reported in the returned error.
//
@@ -583,7 +709,6 @@ func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) {
var (
metricFamiliesByName = map[string]*dto.MetricFamily{}
metricHashes = map[uint64]struct{}{}
- dimHashes = map[string]uint64{}
errs MultiError // The collected errors to return in the end.
)
@@ -620,10 +745,14 @@ func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) {
existingMF.Name = mf.Name
existingMF.Help = mf.Help
existingMF.Type = mf.Type
+ if err := checkSuffixCollisions(existingMF, metricFamiliesByName); err != nil {
+ errs = append(errs, err)
+ continue
+ }
metricFamiliesByName[mf.GetName()] = existingMF
}
for _, m := range mf.Metric {
- if err := checkMetricConsistency(existingMF, m, metricHashes, dimHashes); err != nil {
+ if err := checkMetricConsistency(existingMF, m, metricHashes); err != nil {
errs = append(errs, err)
continue
}
@@ -631,88 +760,80 @@ func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) {
}
}
}
- return normalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap()
-}
-
-// metricSorter is a sortable slice of *dto.Metric.
-type metricSorter []*dto.Metric
-
-func (s metricSorter) Len() int {
- return len(s)
+ return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap()
}
-func (s metricSorter) Swap(i, j int) {
- s[i], s[j] = s[j], s[i]
-}
-
-func (s metricSorter) Less(i, j int) bool {
- if len(s[i].Label) != len(s[j].Label) {
- // This should not happen. The metrics are
- // inconsistent. However, we have to deal with the fact, as
- // people might use custom collectors or metric family injection
- // to create inconsistent metrics. So let's simply compare the
- // number of labels in this case. That will still yield
- // reproducible sorting.
- return len(s[i].Label) < len(s[j].Label)
- }
- for n, lp := range s[i].Label {
- vi := lp.GetValue()
- vj := s[j].Label[n].GetValue()
- if vi != vj {
- return vi < vj
- }
- }
-
- // We should never arrive here. Multiple metrics with the same
- // label set in the same scrape will lead to undefined ingestion
- // behavior. However, as above, we have to provide stable sorting
- // here, even for inconsistent metrics. So sort equal metrics
- // by their timestamp, with missing timestamps (implying "now")
- // coming last.
- if s[i].TimestampMs == nil {
- return false
- }
- if s[j].TimestampMs == nil {
- return true
- }
- return s[i].GetTimestampMs() < s[j].GetTimestampMs()
-}
-
-// normalizeMetricFamilies returns a MetricFamily slice with empty
-// MetricFamilies pruned and the remaining MetricFamilies sorted by name within
-// the slice, with the contained Metrics sorted within each MetricFamily.
-func normalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily {
- for _, mf := range metricFamiliesByName {
- sort.Sort(metricSorter(mf.Metric))
+// checkSuffixCollisions checks for collisions with the “magic” suffixes the
+// Prometheus text format and the internal metric representation of the
+// Prometheus server add while flattening Summaries and Histograms.
+func checkSuffixCollisions(mf *dto.MetricFamily, mfs map[string]*dto.MetricFamily) error {
+ var (
+ newName = mf.GetName()
+ newType = mf.GetType()
+ newNameWithoutSuffix = ""
+ )
+ switch {
+ case strings.HasSuffix(newName, "_count"):
+ newNameWithoutSuffix = newName[:len(newName)-6]
+ case strings.HasSuffix(newName, "_sum"):
+ newNameWithoutSuffix = newName[:len(newName)-4]
+ case strings.HasSuffix(newName, "_bucket"):
+ newNameWithoutSuffix = newName[:len(newName)-7]
+ }
+ if newNameWithoutSuffix != "" {
+ if existingMF, ok := mfs[newNameWithoutSuffix]; ok {
+ switch existingMF.GetType() {
+ case dto.MetricType_SUMMARY:
+ if !strings.HasSuffix(newName, "_bucket") {
+ return fmt.Errorf(
+ "collected metric named %q collides with previously collected summary named %q",
+ newName, newNameWithoutSuffix,
+ )
+ }
+ case dto.MetricType_HISTOGRAM:
+ return fmt.Errorf(
+ "collected metric named %q collides with previously collected histogram named %q",
+ newName, newNameWithoutSuffix,
+ )
+ }
+ }
}
- names := make([]string, 0, len(metricFamiliesByName))
- for name, mf := range metricFamiliesByName {
- if len(mf.Metric) > 0 {
- names = append(names, name)
+ if newType == dto.MetricType_SUMMARY || newType == dto.MetricType_HISTOGRAM {
+ if _, ok := mfs[newName+"_count"]; ok {
+ return fmt.Errorf(
+ "collected histogram or summary named %q collides with previously collected metric named %q",
+ newName, newName+"_count",
+ )
+ }
+ if _, ok := mfs[newName+"_sum"]; ok {
+ return fmt.Errorf(
+ "collected histogram or summary named %q collides with previously collected metric named %q",
+ newName, newName+"_sum",
+ )
}
}
- sort.Strings(names)
- result := make([]*dto.MetricFamily, 0, len(names))
- for _, name := range names {
- result = append(result, metricFamiliesByName[name])
+ if newType == dto.MetricType_HISTOGRAM {
+ if _, ok := mfs[newName+"_bucket"]; ok {
+ return fmt.Errorf(
+ "collected histogram named %q collides with previously collected metric named %q",
+ newName, newName+"_bucket",
+ )
+ }
}
- return result
+ return nil
}
// checkMetricConsistency checks if the provided Metric is consistent with the
-// provided MetricFamily. It also hashed the Metric labels and the MetricFamily
+// provided MetricFamily. It also hashes the Metric labels and the MetricFamily
// name. If the resulting hash is already in the provided metricHashes, an error
-// is returned. If not, it is added to metricHashes. The provided dimHashes maps
-// MetricFamily names to their dimHash (hashed sorted label names). If dimHashes
-// doesn't yet contain a hash for the provided MetricFamily, it is
-// added. Otherwise, an error is returned if the existing dimHashes in not equal
-// the calculated dimHash.
+// is returned. If not, it is added to metricHashes.
func checkMetricConsistency(
metricFamily *dto.MetricFamily,
dtoMetric *dto.Metric,
metricHashes map[uint64]struct{},
- dimHashes map[string]uint64,
) error {
+ name := metricFamily.GetName()
+
// Type consistency with metric family.
if metricFamily.GetType() == dto.MetricType_GAUGE && dtoMetric.Gauge == nil ||
metricFamily.GetType() == dto.MetricType_COUNTER && dtoMetric.Counter == nil ||
@@ -720,48 +841,67 @@ func checkMetricConsistency(
metricFamily.GetType() == dto.MetricType_HISTOGRAM && dtoMetric.Histogram == nil ||
metricFamily.GetType() == dto.MetricType_UNTYPED && dtoMetric.Untyped == nil {
return fmt.Errorf(
- "collected metric %s %s is not a %s",
- metricFamily.GetName(), dtoMetric, metricFamily.GetType(),
+ "collected metric %q { %s} is not a %s",
+ name, dtoMetric, metricFamily.GetType(),
)
}
+ previousLabelName := ""
for _, labelPair := range dtoMetric.GetLabel() {
- if !utf8.ValidString(*labelPair.Value) {
- return fmt.Errorf("collected metric's label %s is not utf8: %#v", *labelPair.Name, *labelPair.Value)
+ labelName := labelPair.GetName()
+ if labelName == previousLabelName {
+ return fmt.Errorf(
+ "collected metric %q { %s} has two or more labels with the same name: %s",
+ name, dtoMetric, labelName,
+ )
+ }
+ if !checkLabelName(labelName) {
+ return fmt.Errorf(
+ "collected metric %q { %s} has a label with an invalid name: %s",
+ name, dtoMetric, labelName,
+ )
+ }
+ if dtoMetric.Summary != nil && labelName == quantileLabel {
+ return fmt.Errorf(
+ "collected metric %q { %s} must not have an explicit %q label",
+ name, dtoMetric, quantileLabel,
+ )
+ }
+ if !utf8.ValidString(labelPair.GetValue()) {
+ return fmt.Errorf(
+ "collected metric %q { %s} has a label named %q whose value is not utf8: %#v",
+ name, dtoMetric, labelName, labelPair.GetValue())
}
+ previousLabelName = labelName
}
- // Is the metric unique (i.e. no other metric with the same name and the same label values)?
- h := hashNew()
- h = hashAdd(h, metricFamily.GetName())
- h = hashAddByte(h, separatorByte)
- dh := hashNew()
+ // Is the metric unique (i.e. no other metric with the same name and the same labels)?
+ h := xxhash.New()
+ h.WriteString(name)
+ h.Write(separatorByteSlice)
// Make sure label pairs are sorted. We depend on it for the consistency
// check.
- sort.Sort(LabelPairSorter(dtoMetric.Label))
+ if !sort.IsSorted(labelPairSorter(dtoMetric.Label)) {
+ // We cannot sort dtoMetric.Label in place as it is immutable by contract.
+ copiedLabels := make([]*dto.LabelPair, len(dtoMetric.Label))
+ copy(copiedLabels, dtoMetric.Label)
+ sort.Sort(labelPairSorter(copiedLabels))
+ dtoMetric.Label = copiedLabels
+ }
for _, lp := range dtoMetric.Label {
- h = hashAdd(h, lp.GetValue())
- h = hashAddByte(h, separatorByte)
- dh = hashAdd(dh, lp.GetName())
- dh = hashAddByte(dh, separatorByte)
+ h.WriteString(lp.GetName())
+ h.Write(separatorByteSlice)
+ h.WriteString(lp.GetValue())
+ h.Write(separatorByteSlice)
}
- if _, exists := metricHashes[h]; exists {
+ hSum := h.Sum64()
+ if _, exists := metricHashes[hSum]; exists {
return fmt.Errorf(
- "collected metric %s %s was collected before with the same name and label values",
- metricFamily.GetName(), dtoMetric,
+ "collected metric %q { %s} was collected before with the same name and label values",
+ name, dtoMetric,
)
}
- if dimHash, ok := dimHashes[metricFamily.GetName()]; ok {
- if dimHash != dh {
- return fmt.Errorf(
- "collected metric %s %s has label dimensions inconsistent with previously collected metrics in the same metric family",
- metricFamily.GetName(), dtoMetric,
- )
- }
- } else {
- dimHashes[metricFamily.GetName()] = dh
- }
- metricHashes[h] = struct{}{}
+ metricHashes[hSum] = struct{}{}
return nil
}
@@ -779,8 +919,8 @@ func checkDescConsistency(
}
// Is the desc consistent with the content of the metric?
- lpsFromDesc := make([]*dto.LabelPair, 0, len(dtoMetric.Label))
- lpsFromDesc = append(lpsFromDesc, desc.constLabelPairs...)
+ lpsFromDesc := make([]*dto.LabelPair, len(desc.constLabelPairs), len(dtoMetric.Label))
+ copy(lpsFromDesc, desc.constLabelPairs)
for _, l := range desc.variableLabels {
lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{
Name: proto.String(l),
@@ -792,7 +932,7 @@ func checkDescConsistency(
metricFamily.GetName(), dtoMetric, desc,
)
}
- sort.Sort(LabelPairSorter(lpsFromDesc))
+ sort.Sort(labelPairSorter(lpsFromDesc))
for i, lpFromDesc := range lpsFromDesc {
lpFromMetric := dtoMetric.Label[i]
if lpFromDesc.GetName() != lpFromMetric.GetName() ||
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go
index f7dc85b96..ae42e761a 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/summary.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/summary.go
@@ -16,8 +16,10 @@ package prometheus
import (
"fmt"
"math"
+ "runtime"
"sort"
"sync"
+ "sync/atomic"
"time"
"github.com/beorn7/perks/quantile"
@@ -37,7 +39,7 @@ const quantileLabel = "quantile"
// A typical use-case is the observation of request latencies. By default, a
// Summary provides the median, the 90th and the 99th percentile of the latency
// as rank estimations. However, the default behavior will change in the
-// upcoming v0.10 of the library. There will be no rank estiamtions at all by
+// upcoming v1.0.0 of the library. There will be no rank estimations at all by
// default. For a sane transition, it is recommended to set the desired rank
// estimations explicitly.
//
@@ -56,16 +58,8 @@ type Summary interface {
Observe(float64)
}
-// DefObjectives are the default Summary quantile values.
-//
-// Deprecated: DefObjectives will not be used as the default objectives in
-// v0.10 of the library. The default Summary will have no quantiles then.
-var (
- DefObjectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}
-
- errQuantileLabelNotAllowed = fmt.Errorf(
- "%q is not allowed as label name in summaries", quantileLabel,
- )
+var errQuantileLabelNotAllowed = fmt.Errorf(
+ "%q is not allowed as label name in summaries", quantileLabel,
)
// Default values for SummaryOpts.
@@ -81,10 +75,10 @@ const (
)
// SummaryOpts bundles the options for creating a Summary metric. It is
-// mandatory to set Name and Help to a non-empty string. While all other fields
-// are optional and can safely be left at their zero value, it is recommended to
-// explicitly set the Objectives field to the desired value as the default value
-// will change in the upcoming v0.10 of the library.
+// mandatory to set Name to a non-empty string. While all other fields are
+// optional and can safely be left at their zero value, it is recommended to set
+// a help string and to explicitly set the Objectives field to the desired value
+// as the default value will change in the upcoming v1.0.0 of the library.
type SummaryOpts struct {
// Namespace, Subsystem, and Name are components of the fully-qualified
// name of the Summary (created by joining these components with
@@ -95,7 +89,7 @@ type SummaryOpts struct {
Subsystem string
Name string
- // Help provides information about this Summary. Mandatory!
+ // Help provides information about this Summary.
//
// Metrics with the same fully-qualified name must have the same Help
// string.
@@ -105,6 +99,11 @@ type SummaryOpts struct {
// with the same fully-qualified name must have the same label names in
// their ConstLabels.
//
+ // Due to the way a Summary is represented in the Prometheus text format
+ // and how it is handled by the Prometheus server internally, “quantile”
+ // is an illegal label name. Construction of a Summary or SummaryVec
+ // will panic if this label name is used in ConstLabels.
+ //
// ConstLabels are only used rarely. In particular, do not use them to
// attach the same labels to all your metrics. Those use cases are
// better covered by target labels set by the scraping Prometheus
@@ -116,13 +115,8 @@ type SummaryOpts struct {
// Objectives defines the quantile rank estimates with their respective
// absolute error. If Objectives[q] = e, then the value reported for q
// will be the φ-quantile value for some φ between q-e and q+e. The
- // default value is DefObjectives. It is used if Objectives is left at
- // its zero value (i.e. nil). To create a Summary without Objectives,
- // set it to an empty map (i.e. map[float64]float64{}).
- //
- // Deprecated: Note that the current value of DefObjectives is
- // deprecated. It will be replaced by an empty map in v0.10 of the
- // library. Please explicitly set Objectives to the desired value.
+ // default value is an empty map, resulting in a summary without
+ // quantiles.
Objectives map[float64]float64
// MaxAge defines the duration for which an observation stays relevant
@@ -146,7 +140,7 @@ type SummaryOpts struct {
BufCap uint32
}
-// Great fuck-up with the sliding-window decay algorithm... The Merge method of
+// Problem with the sliding-window decay algorithm... The Merge method of
// perk/quantile is actually not working as advertised - and it might be
// unfixable, as the underlying algorithm is apparently not capable of merging
// summaries in the first place. To avoid using Merge, we are currently adding
@@ -176,7 +170,7 @@ func NewSummary(opts SummaryOpts) Summary {
func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
if len(desc.variableLabels) != len(labelValues) {
- panic(errInconsistentCardinality)
+ panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, labelValues))
}
for _, n := range desc.variableLabels {
@@ -191,7 +185,7 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
}
if opts.Objectives == nil {
- opts.Objectives = DefObjectives
+ opts.Objectives = map[float64]float64{}
}
if opts.MaxAge < 0 {
@@ -209,6 +203,17 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
opts.BufCap = DefBufCap
}
+ if len(opts.Objectives) == 0 {
+ // Use the lock-free implementation of a Summary without objectives.
+ s := &noObjectivesSummary{
+ desc: desc,
+ labelPairs: makeLabelPairs(desc, labelValues),
+ counts: [2]*summaryCounts{{}, {}},
+ }
+ s.init(s) // Init self-collection.
+ return s
+ }
+
s := &summary{
desc: desc,
@@ -377,6 +382,116 @@ func (s *summary) swapBufs(now time.Time) {
}
}
+type summaryCounts struct {
+ // sumBits contains the bits of the float64 representing the sum of all
+ // observations. sumBits and count have to go first in the struct to
+ // guarantee alignment for atomic operations.
+ // http://golang.org/pkg/sync/atomic/#pkg-note-BUG
+ sumBits uint64
+ count uint64
+}
+
+type noObjectivesSummary struct {
+ // countAndHotIdx enables lock-free writes with use of atomic updates.
+ // The most significant bit is the hot index [0 or 1] of the count field
+ // below. Observe calls update the hot one. All remaining bits count the
+ // number of Observe calls. Observe starts by incrementing this counter,
+ // and finish by incrementing the count field in the respective
+ // summaryCounts, as a marker for completion.
+ //
+ // Calls of the Write method (which are non-mutating reads from the
+ // perspective of the summary) swap the hot–cold under the writeMtx
+ // lock. A cooldown is awaited (while locked) by comparing the number of
+ // observations with the initiation count. Once they match, then the
+ // last observation on the now cool one has completed. All cool fields must
+ // be merged into the new hot before releasing writeMtx.
+
+ // Fields with atomic access first! See alignment constraint:
+ // http://golang.org/pkg/sync/atomic/#pkg-note-BUG
+ countAndHotIdx uint64
+
+ selfCollector
+ desc *Desc
+ writeMtx sync.Mutex // Only used in the Write method.
+
+ // Two counts, one is "hot" for lock-free observations, the other is
+ // "cold" for writing out a dto.Metric. It has to be an array of
+ // pointers to guarantee 64bit alignment of the histogramCounts, see
+ // http://golang.org/pkg/sync/atomic/#pkg-note-BUG.
+ counts [2]*summaryCounts
+
+ labelPairs []*dto.LabelPair
+}
+
+func (s *noObjectivesSummary) Desc() *Desc {
+ return s.desc
+}
+
+func (s *noObjectivesSummary) Observe(v float64) {
+ // We increment h.countAndHotIdx so that the counter in the lower
+ // 63 bits gets incremented. At the same time, we get the new value
+ // back, which we can use to find the currently-hot counts.
+ n := atomic.AddUint64(&s.countAndHotIdx, 1)
+ hotCounts := s.counts[n>>63]
+
+ for {
+ oldBits := atomic.LoadUint64(&hotCounts.sumBits)
+ newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
+ if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
+ break
+ }
+ }
+ // Increment count last as we take it as a signal that the observation
+ // is complete.
+ atomic.AddUint64(&hotCounts.count, 1)
+}
+
+func (s *noObjectivesSummary) Write(out *dto.Metric) error {
+ // For simplicity, we protect this whole method by a mutex. It is not in
+ // the hot path, i.e. Observe is called much more often than Write. The
+ // complication of making Write lock-free isn't worth it, if possible at
+ // all.
+ s.writeMtx.Lock()
+ defer s.writeMtx.Unlock()
+
+ // Adding 1<<63 switches the hot index (from 0 to 1 or from 1 to 0)
+ // without touching the count bits. See the struct comments for a full
+ // description of the algorithm.
+ n := atomic.AddUint64(&s.countAndHotIdx, 1<<63)
+ // count is contained unchanged in the lower 63 bits.
+ count := n & ((1 << 63) - 1)
+ // The most significant bit tells us which counts is hot. The complement
+ // is thus the cold one.
+ hotCounts := s.counts[n>>63]
+ coldCounts := s.counts[(^n)>>63]
+
+ // Await cooldown.
+ for count != atomic.LoadUint64(&coldCounts.count) {
+ runtime.Gosched() // Let observations get work done.
+ }
+
+ sum := &dto.Summary{
+ SampleCount: proto.Uint64(count),
+ SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))),
+ }
+
+ out.Summary = sum
+ out.Label = s.labelPairs
+
+ // Finally add all the cold counts to the new hot counts and reset the cold counts.
+ atomic.AddUint64(&hotCounts.count, count)
+ atomic.StoreUint64(&coldCounts.count, 0)
+ for {
+ oldBits := atomic.LoadUint64(&hotCounts.sumBits)
+ newBits := math.Float64bits(math.Float64frombits(oldBits) + sum.GetSampleSum())
+ if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
+ atomic.StoreUint64(&coldCounts.sumBits, 0)
+ break
+ }
+ }
+ return nil
+}
+
type quantSort []*dto.Quantile
func (s quantSort) Len() int {
@@ -402,7 +517,16 @@ type SummaryVec struct {
// NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and
// partitioned by the given label names.
+//
+// Due to the way a Summary is represented in the Prometheus text format and how
+// it is handled by the Prometheus server internally, “quantile” is an illegal
+// label name. NewSummaryVec will panic if this label name is used.
func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec {
+ for _, ln := range labelNames {
+ if ln == quantileLabel {
+ panic(errQuantileLabelNotAllowed)
+ }
+ }
desc := NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
opts.Help,
@@ -572,7 +696,7 @@ func (s *constSummary) Write(out *dto.Metric) error {
// map[float64]float64{0.5: 0.23, 0.99: 0.56}
//
// NewConstSummary returns an error if the length of labelValues is not
-// consistent with the variable labels in Desc.
+// consistent with the variable labels in Desc or if Desc is invalid.
func NewConstSummary(
desc *Desc,
count uint64,
@@ -580,6 +704,9 @@ func NewConstSummary(
quantiles map[float64]float64,
labelValues ...string,
) (Metric, error) {
+ if desc.err != nil {
+ return nil, desc.err
+ }
if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil {
return nil, err
}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/timer.go b/vendor/github.com/prometheus/client_golang/prometheus/timer.go
index b8fc5f18c..8d5f10523 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/timer.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/timer.go
@@ -39,13 +39,16 @@ func NewTimer(o Observer) *Timer {
// ObserveDuration records the duration passed since the Timer was created with
// NewTimer. It calls the Observe method of the Observer provided during
-// construction with the duration in seconds as an argument. ObserveDuration is
-// usually called with a defer statement.
+// construction with the duration in seconds as an argument. The observed
+// duration is also returned. ObserveDuration is usually called with a defer
+// statement.
//
// Note that this method is only guaranteed to never observe negative durations
// if used with Go1.9+.
-func (t *Timer) ObserveDuration() {
+func (t *Timer) ObserveDuration() time.Duration {
+ d := time.Since(t.begin)
if t.observer != nil {
- t.observer.Observe(time.Since(t.begin).Seconds())
+ t.observer.Observe(d.Seconds())
}
+ return d
}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/value.go b/vendor/github.com/prometheus/client_golang/prometheus/value.go
index 543b57c27..eb248f108 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/value.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/value.go
@@ -17,9 +17,9 @@ import (
"fmt"
"sort"
- dto "github.com/prometheus/client_model/go"
-
"github.com/golang/protobuf/proto"
+
+ dto "github.com/prometheus/client_model/go"
)
// ValueType is an enumeration of metric types that represent a simple value.
@@ -77,8 +77,12 @@ func (v *valueFunc) Write(out *dto.Metric) error {
// operations. However, when implementing custom Collectors, it is useful as a
// throw-away metric that is generated on the fly to send it to Prometheus in
// the Collect method. NewConstMetric returns an error if the length of
-// labelValues is not consistent with the variable labels in Desc.
+// labelValues is not consistent with the variable labels in Desc or if Desc is
+// invalid.
func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) (Metric, error) {
+ if desc.err != nil {
+ return nil, desc.err
+ }
if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil {
return nil, err
}
@@ -152,9 +156,7 @@ func makeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair {
Value: proto.String(labelValues[i]),
})
}
- for _, lp := range desc.constLabelPairs {
- labelPairs = append(labelPairs, lp)
- }
- sort.Sort(LabelPairSorter(labelPairs))
+ labelPairs = append(labelPairs, desc.constLabelPairs...)
+ sort.Sort(labelPairSorter(labelPairs))
return labelPairs
}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/github.com/prometheus/client_golang/prometheus/vec.go
index cea158249..19df3fe6b 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/vec.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/vec.go
@@ -24,7 +24,7 @@ import (
// their label values. metricVec is not used directly (and therefore
// unexported). It is used as a building block for implementations of vectors of
// a given metric type, like GaugeVec, CounterVec, SummaryVec, and HistogramVec.
-// It also handles label currying. It uses basicMetricVec internally.
+// It also handles label currying.
type metricVec struct {
*metricMap
@@ -277,6 +277,9 @@ func (m *metricMap) deleteByHashWithLabelValues(
func (m *metricMap) deleteByHashWithLabels(
h uint64, labels Labels, curry []curriedLabelValue,
) bool {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
metrics, ok := m.metrics[h]
if !ok {
return false
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/wrap.go b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go
new file mode 100644
index 000000000..e303eef6d
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go
@@ -0,0 +1,200 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "fmt"
+ "sort"
+
+ "github.com/golang/protobuf/proto"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+// WrapRegistererWith returns a Registerer wrapping the provided
+// Registerer. Collectors registered with the returned Registerer will be
+// registered with the wrapped Registerer in a modified way. The modified
+// Collector adds the provided Labels to all Metrics it collects (as
+// ConstLabels). The Metrics collected by the unmodified Collector must not
+// duplicate any of those labels.
+//
+// WrapRegistererWith provides a way to add fixed labels to a subset of
+// Collectors. It should not be used to add fixed labels to all metrics exposed.
+//
+// Conflicts between Collectors registered through the original Registerer with
+// Collectors registered through the wrapping Registerer will still be
+// detected. Any AlreadyRegisteredError returned by the Register method of
+// either Registerer will contain the ExistingCollector in the form it was
+// provided to the respective registry.
+//
+// The Collector example demonstrates a use of WrapRegistererWith.
+func WrapRegistererWith(labels Labels, reg Registerer) Registerer {
+ return &wrappingRegisterer{
+ wrappedRegisterer: reg,
+ labels: labels,
+ }
+}
+
+// WrapRegistererWithPrefix returns a Registerer wrapping the provided
+// Registerer. Collectors registered with the returned Registerer will be
+// registered with the wrapped Registerer in a modified way. The modified
+// Collector adds the provided prefix to the name of all Metrics it collects.
+//
+// WrapRegistererWithPrefix is useful to have one place to prefix all metrics of
+// a sub-system. To make this work, register metrics of the sub-system with the
+// wrapping Registerer returned by WrapRegistererWithPrefix. It is rarely useful
+// to use the same prefix for all metrics exposed. In particular, do not prefix
+// metric names that are standardized across applications, as that would break
+// horizontal monitoring, for example the metrics provided by the Go collector
+// (see NewGoCollector) and the process collector (see NewProcessCollector). (In
+// fact, those metrics are already prefixed with “go_” or “process_”,
+// respectively.)
+//
+// Conflicts between Collectors registered through the original Registerer with
+// Collectors registered through the wrapping Registerer will still be
+// detected. Any AlreadyRegisteredError returned by the Register method of
+// either Registerer will contain the ExistingCollector in the form it was
+// provided to the respective registry.
+func WrapRegistererWithPrefix(prefix string, reg Registerer) Registerer {
+ return &wrappingRegisterer{
+ wrappedRegisterer: reg,
+ prefix: prefix,
+ }
+}
+
+type wrappingRegisterer struct {
+ wrappedRegisterer Registerer
+ prefix string
+ labels Labels
+}
+
+func (r *wrappingRegisterer) Register(c Collector) error {
+ return r.wrappedRegisterer.Register(&wrappingCollector{
+ wrappedCollector: c,
+ prefix: r.prefix,
+ labels: r.labels,
+ })
+}
+
+func (r *wrappingRegisterer) MustRegister(cs ...Collector) {
+ for _, c := range cs {
+ if err := r.Register(c); err != nil {
+ panic(err)
+ }
+ }
+}
+
+func (r *wrappingRegisterer) Unregister(c Collector) bool {
+ return r.wrappedRegisterer.Unregister(&wrappingCollector{
+ wrappedCollector: c,
+ prefix: r.prefix,
+ labels: r.labels,
+ })
+}
+
+type wrappingCollector struct {
+ wrappedCollector Collector
+ prefix string
+ labels Labels
+}
+
+func (c *wrappingCollector) Collect(ch chan<- Metric) {
+ wrappedCh := make(chan Metric)
+ go func() {
+ c.wrappedCollector.Collect(wrappedCh)
+ close(wrappedCh)
+ }()
+ for m := range wrappedCh {
+ ch <- &wrappingMetric{
+ wrappedMetric: m,
+ prefix: c.prefix,
+ labels: c.labels,
+ }
+ }
+}
+
+func (c *wrappingCollector) Describe(ch chan<- *Desc) {
+ wrappedCh := make(chan *Desc)
+ go func() {
+ c.wrappedCollector.Describe(wrappedCh)
+ close(wrappedCh)
+ }()
+ for desc := range wrappedCh {
+ ch <- wrapDesc(desc, c.prefix, c.labels)
+ }
+}
+
+func (c *wrappingCollector) unwrapRecursively() Collector {
+ switch wc := c.wrappedCollector.(type) {
+ case *wrappingCollector:
+ return wc.unwrapRecursively()
+ default:
+ return wc
+ }
+}
+
+type wrappingMetric struct {
+ wrappedMetric Metric
+ prefix string
+ labels Labels
+}
+
+func (m *wrappingMetric) Desc() *Desc {
+ return wrapDesc(m.wrappedMetric.Desc(), m.prefix, m.labels)
+}
+
+func (m *wrappingMetric) Write(out *dto.Metric) error {
+ if err := m.wrappedMetric.Write(out); err != nil {
+ return err
+ }
+ if len(m.labels) == 0 {
+ // No wrapping labels.
+ return nil
+ }
+ for ln, lv := range m.labels {
+ out.Label = append(out.Label, &dto.LabelPair{
+ Name: proto.String(ln),
+ Value: proto.String(lv),
+ })
+ }
+ sort.Sort(labelPairSorter(out.Label))
+ return nil
+}
+
+func wrapDesc(desc *Desc, prefix string, labels Labels) *Desc {
+ constLabels := Labels{}
+ for _, lp := range desc.constLabelPairs {
+ constLabels[*lp.Name] = *lp.Value
+ }
+ for ln, lv := range labels {
+ if _, alreadyUsed := constLabels[ln]; alreadyUsed {
+ return &Desc{
+ fqName: desc.fqName,
+ help: desc.help,
+ variableLabels: desc.variableLabels,
+ constLabelPairs: desc.constLabelPairs,
+ err: fmt.Errorf("attempted wrapping with already existing label name %q", ln),
+ }
+ }
+ constLabels[ln] = lv
+ }
+ // NewDesc will do remaining validations.
+ newDesc := NewDesc(prefix+desc.fqName, desc.help, desc.variableLabels, constLabels)
+ // Propagate errors if there was any. This will override any errer
+ // created by NewDesc above, i.e. earlier errors get precedence.
+ if desc.err != nil {
+ newDesc.err = desc.err
+ }
+ return newDesc
+}
diff --git a/vendor/github.com/prometheus/client_model/README.md b/vendor/github.com/prometheus/client_model/README.md
index a710042db..e10097582 100644
--- a/vendor/github.com/prometheus/client_model/README.md
+++ b/vendor/github.com/prometheus/client_model/README.md
@@ -1,26 +1,34 @@
-# Background
-Under most circumstances, manually downloading this repository should never
-be required.
+# Deprecation note
-# Prerequisites
-# Base
-* [Google Protocol Buffers](https://developers.google.com/protocol-buffers)
+This repository used to contain the [protocol
+buffer](https://developers.google.com/protocol-buffers) code that defined both
+the data model and the exposition format of Prometheus metrics.
-## Java
-* [Apache Maven](http://maven.apache.org)
-* [Prometheus Maven Repository](https://github.com/prometheus/io.prometheus-maven-repository) checked out into ../io.prometheus-maven-repository
+Starting with v2.0.0, the [Prometheus
+server](https://github.com/prometheus/prometheus) does not ingest the
+protobuf-based exposition format anymore. Currently, all but one of the
+[official instrumentation
+libraries](https://prometheus.io/docs/instrumenting/clientlibs/) do not expose
+the protobuf-based exposition format. The [Go instrumentation
+library](https://github.com/prometheus/client_golang), however, has been built
+around the protobuf-based data model. As a byproduct thereof, it is still able
+to expose the protobuf-based exposition format. The Go instrumentation library
+is the only remaining repository within the [Prometheus GitHub
+org](https://github.com/prometheus) directly using the prometheus/client_model
+repository.
-## Go
-* [Go](http://golang.org)
-* [goprotobuf](https://code.google.com/p/goprotobuf)
+Therefore, formerly existing support for languages other than Go (namely C++,
+Java, Python, Ruby) has been removed from this repository. If you are a 3rd
+party user of those languages, you can go back to [commit
+14fe0d1](https://github.com/prometheus/client_model/commit/14fe0d1b01d4d5fc031dd4bec1823bd3ebbe8016)
+to keep using the old code, or you can consume
+[`metrics.proto`](https://github.com/prometheus/client_model/blob/master/metrics.proto)
+directly with your own protobuf tooling. Note, however, that changes of
+`metrics.proto` after [commit
+14fe0d1](https://github.com/prometheus/client_model/commit/14fe0d1b01d4d5fc031dd4bec1823bd3ebbe8016)
+are solely informed by requirements of the Go instrumentation library and will
+not take into account any requirements of other languages or stability concerns
+for the protobuf-based exposition format.
-## Ruby
-* [Ruby](https://www.ruby-lang.org)
-* [bundler](https://rubygems.org/gems/bundler)
-
-# Building
- $ make
-
-# Getting Started
- * The Go source code is periodically indexed: [Go Protocol Buffer Model](http://godoc.org/github.com/prometheus/client_model/go).
- * All of the core developers are accessible via the [Prometheus Developers Mailinglist](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers).
+Check out the [OpenMetrics project](https://openmetrics.io/) for the future of
+the data model and exposition format used by Prometheus and others.
diff --git a/vendor/github.com/prometheus/client_model/go.mod b/vendor/github.com/prometheus/client_model/go.mod
new file mode 100644
index 000000000..e7e0a86cc
--- /dev/null
+++ b/vendor/github.com/prometheus/client_model/go.mod
@@ -0,0 +1,8 @@
+module github.com/prometheus/client_model
+
+go 1.9
+
+require (
+ github.com/golang/protobuf v1.2.0
+ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f // indirect
+)
diff --git a/vendor/github.com/prometheus/client_model/go/metrics.pb.go b/vendor/github.com/prometheus/client_model/go/metrics.pb.go
index b065f8683..9805432c2 100644
--- a/vendor/github.com/prometheus/client_model/go/metrics.pb.go
+++ b/vendor/github.com/prometheus/client_model/go/metrics.pb.go
@@ -1,34 +1,23 @@
-// Code generated by protoc-gen-go.
+// Code generated by protoc-gen-go. DO NOT EDIT.
// source: metrics.proto
-// DO NOT EDIT!
-
-/*
-Package io_prometheus_client is a generated protocol buffer package.
-
-It is generated from these files:
- metrics.proto
-
-It has these top-level messages:
- LabelPair
- Gauge
- Counter
- Quantile
- Summary
- Untyped
- Histogram
- Bucket
- Metric
- MetricFamily
-*/
-package io_prometheus_client
+
+package io_prometheus_client // import "github.com/prometheus/client_model/go"
import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
+var _ = fmt.Errorf
var _ = math.Inf
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
type MetricType int32
const (
@@ -70,16 +59,41 @@ func (x *MetricType) UnmarshalJSON(data []byte) error {
*x = MetricType(value)
return nil
}
+func (MetricType) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{0}
+}
type LabelPair struct {
- Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
- Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
func (m *LabelPair) Reset() { *m = LabelPair{} }
func (m *LabelPair) String() string { return proto.CompactTextString(m) }
func (*LabelPair) ProtoMessage() {}
+func (*LabelPair) Descriptor() ([]byte, []int) {
+ return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{0}
+}
+func (m *LabelPair) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_LabelPair.Unmarshal(m, b)
+}
+func (m *LabelPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_LabelPair.Marshal(b, m, deterministic)
+}
+func (dst *LabelPair) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_LabelPair.Merge(dst, src)
+}
+func (m *LabelPair) XXX_Size() int {
+ return xxx_messageInfo_LabelPair.Size(m)
+}
+func (m *LabelPair) XXX_DiscardUnknown() {
+ xxx_messageInfo_LabelPair.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LabelPair proto.InternalMessageInfo
func (m *LabelPair) GetName() string {
if m != nil && m.Name != nil {
@@ -96,13 +110,35 @@ func (m *LabelPair) GetValue() string {
}
type Gauge struct {
- Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
func (m *Gauge) Reset() { *m = Gauge{} }
func (m *Gauge) String() string { return proto.CompactTextString(m) }
func (*Gauge) ProtoMessage() {}
+func (*Gauge) Descriptor() ([]byte, []int) {
+ return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{1}
+}
+func (m *Gauge) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Gauge.Unmarshal(m, b)
+}
+func (m *Gauge) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Gauge.Marshal(b, m, deterministic)
+}
+func (dst *Gauge) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Gauge.Merge(dst, src)
+}
+func (m *Gauge) XXX_Size() int {
+ return xxx_messageInfo_Gauge.Size(m)
+}
+func (m *Gauge) XXX_DiscardUnknown() {
+ xxx_messageInfo_Gauge.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Gauge proto.InternalMessageInfo
func (m *Gauge) GetValue() float64 {
if m != nil && m.Value != nil {
@@ -112,13 +148,35 @@ func (m *Gauge) GetValue() float64 {
}
type Counter struct {
- Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
func (m *Counter) Reset() { *m = Counter{} }
func (m *Counter) String() string { return proto.CompactTextString(m) }
func (*Counter) ProtoMessage() {}
+func (*Counter) Descriptor() ([]byte, []int) {
+ return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{2}
+}
+func (m *Counter) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Counter.Unmarshal(m, b)
+}
+func (m *Counter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Counter.Marshal(b, m, deterministic)
+}
+func (dst *Counter) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Counter.Merge(dst, src)
+}
+func (m *Counter) XXX_Size() int {
+ return xxx_messageInfo_Counter.Size(m)
+}
+func (m *Counter) XXX_DiscardUnknown() {
+ xxx_messageInfo_Counter.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Counter proto.InternalMessageInfo
func (m *Counter) GetValue() float64 {
if m != nil && m.Value != nil {
@@ -128,14 +186,36 @@ func (m *Counter) GetValue() float64 {
}
type Quantile struct {
- Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"`
- Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"`
+ Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
func (m *Quantile) Reset() { *m = Quantile{} }
func (m *Quantile) String() string { return proto.CompactTextString(m) }
func (*Quantile) ProtoMessage() {}
+func (*Quantile) Descriptor() ([]byte, []int) {
+ return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{3}
+}
+func (m *Quantile) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Quantile.Unmarshal(m, b)
+}
+func (m *Quantile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Quantile.Marshal(b, m, deterministic)
+}
+func (dst *Quantile) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Quantile.Merge(dst, src)
+}
+func (m *Quantile) XXX_Size() int {
+ return xxx_messageInfo_Quantile.Size(m)
+}
+func (m *Quantile) XXX_DiscardUnknown() {
+ xxx_messageInfo_Quantile.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Quantile proto.InternalMessageInfo
func (m *Quantile) GetQuantile() float64 {
if m != nil && m.Quantile != nil {
@@ -152,15 +232,37 @@ func (m *Quantile) GetValue() float64 {
}
type Summary struct {
- SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count" json:"sample_count,omitempty"`
- SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum" json:"sample_sum,omitempty"`
- Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"`
+ SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"`
+ Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
func (m *Summary) Reset() { *m = Summary{} }
func (m *Summary) String() string { return proto.CompactTextString(m) }
func (*Summary) ProtoMessage() {}
+func (*Summary) Descriptor() ([]byte, []int) {
+ return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{4}
+}
+func (m *Summary) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Summary.Unmarshal(m, b)
+}
+func (m *Summary) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Summary.Marshal(b, m, deterministic)
+}
+func (dst *Summary) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Summary.Merge(dst, src)
+}
+func (m *Summary) XXX_Size() int {
+ return xxx_messageInfo_Summary.Size(m)
+}
+func (m *Summary) XXX_DiscardUnknown() {
+ xxx_messageInfo_Summary.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Summary proto.InternalMessageInfo
func (m *Summary) GetSampleCount() uint64 {
if m != nil && m.SampleCount != nil {
@@ -184,13 +286,35 @@ func (m *Summary) GetQuantile() []*Quantile {
}
type Untyped struct {
- Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
func (m *Untyped) Reset() { *m = Untyped{} }
func (m *Untyped) String() string { return proto.CompactTextString(m) }
func (*Untyped) ProtoMessage() {}
+func (*Untyped) Descriptor() ([]byte, []int) {
+ return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{5}
+}
+func (m *Untyped) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Untyped.Unmarshal(m, b)
+}
+func (m *Untyped) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Untyped.Marshal(b, m, deterministic)
+}
+func (dst *Untyped) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Untyped.Merge(dst, src)
+}
+func (m *Untyped) XXX_Size() int {
+ return xxx_messageInfo_Untyped.Size(m)
+}
+func (m *Untyped) XXX_DiscardUnknown() {
+ xxx_messageInfo_Untyped.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Untyped proto.InternalMessageInfo
func (m *Untyped) GetValue() float64 {
if m != nil && m.Value != nil {
@@ -200,15 +324,37 @@ func (m *Untyped) GetValue() float64 {
}
type Histogram struct {
- SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count" json:"sample_count,omitempty"`
- SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum" json:"sample_sum,omitempty"`
- Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"`
+ SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"`
+ Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
func (m *Histogram) Reset() { *m = Histogram{} }
func (m *Histogram) String() string { return proto.CompactTextString(m) }
func (*Histogram) ProtoMessage() {}
+func (*Histogram) Descriptor() ([]byte, []int) {
+ return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{6}
+}
+func (m *Histogram) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Histogram.Unmarshal(m, b)
+}
+func (m *Histogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Histogram.Marshal(b, m, deterministic)
+}
+func (dst *Histogram) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Histogram.Merge(dst, src)
+}
+func (m *Histogram) XXX_Size() int {
+ return xxx_messageInfo_Histogram.Size(m)
+}
+func (m *Histogram) XXX_DiscardUnknown() {
+ xxx_messageInfo_Histogram.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Histogram proto.InternalMessageInfo
func (m *Histogram) GetSampleCount() uint64 {
if m != nil && m.SampleCount != nil {
@@ -232,14 +378,36 @@ func (m *Histogram) GetBucket() []*Bucket {
}
type Bucket struct {
- CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count" json:"cumulative_count,omitempty"`
- UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound" json:"upper_bound,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount" json:"cumulative_count,omitempty"`
+ UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound" json:"upper_bound,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
func (m *Bucket) Reset() { *m = Bucket{} }
func (m *Bucket) String() string { return proto.CompactTextString(m) }
func (*Bucket) ProtoMessage() {}
+func (*Bucket) Descriptor() ([]byte, []int) {
+ return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{7}
+}
+func (m *Bucket) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Bucket.Unmarshal(m, b)
+}
+func (m *Bucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Bucket.Marshal(b, m, deterministic)
+}
+func (dst *Bucket) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Bucket.Merge(dst, src)
+}
+func (m *Bucket) XXX_Size() int {
+ return xxx_messageInfo_Bucket.Size(m)
+}
+func (m *Bucket) XXX_DiscardUnknown() {
+ xxx_messageInfo_Bucket.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Bucket proto.InternalMessageInfo
func (m *Bucket) GetCumulativeCount() uint64 {
if m != nil && m.CumulativeCount != nil {
@@ -256,19 +424,41 @@ func (m *Bucket) GetUpperBound() float64 {
}
type Metric struct {
- Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"`
- Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"`
- Counter *Counter `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"`
- Summary *Summary `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"`
- Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"`
- Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"`
- TimestampMs *int64 `protobuf:"varint,6,opt,name=timestamp_ms" json:"timestamp_ms,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"`
+ Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"`
+ Counter *Counter `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"`
+ Summary *Summary `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"`
+ Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"`
+ Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"`
+ TimestampMs *int64 `protobuf:"varint,6,opt,name=timestamp_ms,json=timestampMs" json:"timestamp_ms,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
func (m *Metric) Reset() { *m = Metric{} }
func (m *Metric) String() string { return proto.CompactTextString(m) }
func (*Metric) ProtoMessage() {}
+func (*Metric) Descriptor() ([]byte, []int) {
+ return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{8}
+}
+func (m *Metric) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Metric.Unmarshal(m, b)
+}
+func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Metric.Marshal(b, m, deterministic)
+}
+func (dst *Metric) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Metric.Merge(dst, src)
+}
+func (m *Metric) XXX_Size() int {
+ return xxx_messageInfo_Metric.Size(m)
+}
+func (m *Metric) XXX_DiscardUnknown() {
+ xxx_messageInfo_Metric.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Metric proto.InternalMessageInfo
func (m *Metric) GetLabel() []*LabelPair {
if m != nil {
@@ -320,16 +510,38 @@ func (m *Metric) GetTimestampMs() int64 {
}
type MetricFamily struct {
- Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
- Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"`
- Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"`
- Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"`
+ Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"`
+ Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
func (m *MetricFamily) Reset() { *m = MetricFamily{} }
func (m *MetricFamily) String() string { return proto.CompactTextString(m) }
func (*MetricFamily) ProtoMessage() {}
+func (*MetricFamily) Descriptor() ([]byte, []int) {
+ return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{9}
+}
+func (m *MetricFamily) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_MetricFamily.Unmarshal(m, b)
+}
+func (m *MetricFamily) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_MetricFamily.Marshal(b, m, deterministic)
+}
+func (dst *MetricFamily) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MetricFamily.Merge(dst, src)
+}
+func (m *MetricFamily) XXX_Size() int {
+ return xxx_messageInfo_MetricFamily.Size(m)
+}
+func (m *MetricFamily) XXX_DiscardUnknown() {
+ xxx_messageInfo_MetricFamily.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MetricFamily proto.InternalMessageInfo
func (m *MetricFamily) GetName() string {
if m != nil && m.Name != nil {
@@ -360,5 +572,58 @@ func (m *MetricFamily) GetMetric() []*Metric {
}
func init() {
+ proto.RegisterType((*LabelPair)(nil), "io.prometheus.client.LabelPair")
+ proto.RegisterType((*Gauge)(nil), "io.prometheus.client.Gauge")
+ proto.RegisterType((*Counter)(nil), "io.prometheus.client.Counter")
+ proto.RegisterType((*Quantile)(nil), "io.prometheus.client.Quantile")
+ proto.RegisterType((*Summary)(nil), "io.prometheus.client.Summary")
+ proto.RegisterType((*Untyped)(nil), "io.prometheus.client.Untyped")
+ proto.RegisterType((*Histogram)(nil), "io.prometheus.client.Histogram")
+ proto.RegisterType((*Bucket)(nil), "io.prometheus.client.Bucket")
+ proto.RegisterType((*Metric)(nil), "io.prometheus.client.Metric")
+ proto.RegisterType((*MetricFamily)(nil), "io.prometheus.client.MetricFamily")
proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value)
}
+
+func init() { proto.RegisterFile("metrics.proto", fileDescriptor_metrics_c97c9a2b9560cb8f) }
+
+var fileDescriptor_metrics_c97c9a2b9560cb8f = []byte{
+ // 591 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0x4f, 0x4f, 0xdb, 0x4e,
+ 0x14, 0xfc, 0x99, 0xd8, 0x09, 0x7e, 0x86, 0x5f, 0xad, 0x15, 0x07, 0xab, 0x2d, 0x25, 0xcd, 0x89,
+ 0xf6, 0x10, 0x54, 0x04, 0xaa, 0x44, 0xdb, 0x03, 0x50, 0x1a, 0x2a, 0xd5, 0x40, 0x37, 0xc9, 0x81,
+ 0x5e, 0xac, 0x8d, 0x59, 0x25, 0x56, 0xbd, 0xb6, 0x6b, 0xef, 0x22, 0xe5, 0xdc, 0x43, 0xbf, 0x47,
+ 0xbf, 0x68, 0xab, 0xfd, 0xe3, 0x18, 0x24, 0xc3, 0xa9, 0xb7, 0xb7, 0xf3, 0x66, 0xde, 0x8e, 0x77,
+ 0xc7, 0x0b, 0x9b, 0x8c, 0xf2, 0x32, 0x89, 0xab, 0x61, 0x51, 0xe6, 0x3c, 0x47, 0x5b, 0x49, 0x2e,
+ 0x2b, 0x46, 0xf9, 0x82, 0x8a, 0x6a, 0x18, 0xa7, 0x09, 0xcd, 0xf8, 0xe0, 0x10, 0xdc, 0x2f, 0x64,
+ 0x46, 0xd3, 0x2b, 0x92, 0x94, 0x08, 0x81, 0x9d, 0x11, 0x46, 0x03, 0xab, 0x6f, 0xed, 0xba, 0x58,
+ 0xd5, 0x68, 0x0b, 0x9c, 0x5b, 0x92, 0x0a, 0x1a, 0xac, 0x29, 0x50, 0x2f, 0x06, 0xdb, 0xe0, 0x8c,
+ 0x88, 0x98, 0xdf, 0x69, 0x4b, 0x8d, 0x55, 0xb7, 0x77, 0xa0, 0x77, 0x9a, 0x8b, 0x8c, 0xd3, 0xf2,
+ 0x01, 0xc2, 0x7b, 0x58, 0xff, 0x2a, 0x48, 0xc6, 0x93, 0x94, 0xa2, 0xa7, 0xb0, 0xfe, 0xc3, 0xd4,
+ 0x86, 0xb4, 0x5a, 0xdf, 0xdf, 0x7d, 0xa5, 0xfe, 0x65, 0x41, 0x6f, 0x2c, 0x18, 0x23, 0xe5, 0x12,
+ 0xbd, 0x84, 0x8d, 0x8a, 0xb0, 0x22, 0xa5, 0x51, 0x2c, 0x77, 0x54, 0x13, 0x6c, 0xec, 0x69, 0x4c,
+ 0x99, 0x40, 0xdb, 0x00, 0x86, 0x52, 0x09, 0x66, 0x26, 0xb9, 0x1a, 0x19, 0x0b, 0x86, 0x8e, 0xee,
+ 0xec, 0xdf, 0xe9, 0x77, 0x76, 0xbd, 0xfd, 0x17, 0xc3, 0xb6, 0xb3, 0x1a, 0xd6, 0x8e, 0x1b, 0x7f,
+ 0xf2, 0x43, 0xa7, 0x19, 0x5f, 0x16, 0xf4, 0xe6, 0x81, 0x0f, 0xfd, 0x69, 0x81, 0x7b, 0x9e, 0x54,
+ 0x3c, 0x9f, 0x97, 0x84, 0xfd, 0x03, 0xb3, 0x07, 0xd0, 0x9d, 0x89, 0xf8, 0x3b, 0xe5, 0xc6, 0xea,
+ 0xf3, 0x76, 0xab, 0x27, 0x8a, 0x83, 0x0d, 0x77, 0x30, 0x81, 0xae, 0x46, 0xd0, 0x2b, 0xf0, 0x63,
+ 0xc1, 0x44, 0x4a, 0x78, 0x72, 0x7b, 0xdf, 0xc5, 0x93, 0x06, 0xd7, 0x4e, 0x76, 0xc0, 0x13, 0x45,
+ 0x41, 0xcb, 0x68, 0x96, 0x8b, 0xec, 0xc6, 0x58, 0x01, 0x05, 0x9d, 0x48, 0x64, 0xf0, 0x67, 0x0d,
+ 0xba, 0xa1, 0xca, 0x18, 0x3a, 0x04, 0x27, 0x95, 0x31, 0x0a, 0x2c, 0xe5, 0x6a, 0xa7, 0xdd, 0xd5,
+ 0x2a, 0x69, 0x58, 0xb3, 0xd1, 0x1b, 0x70, 0xe6, 0x32, 0x46, 0x6a, 0xb8, 0xb7, 0xff, 0xac, 0x5d,
+ 0xa6, 0x92, 0x86, 0x35, 0x13, 0xbd, 0x85, 0x5e, 0xac, 0xa3, 0x15, 0x74, 0x94, 0x68, 0xbb, 0x5d,
+ 0x64, 0xf2, 0x87, 0x6b, 0xb6, 0x14, 0x56, 0x3a, 0x33, 0x81, 0xfd, 0x98, 0xd0, 0x04, 0x0b, 0xd7,
+ 0x6c, 0x29, 0x14, 0xfa, 0x8e, 0x03, 0xe7, 0x31, 0xa1, 0x09, 0x02, 0xae, 0xd9, 0xe8, 0x03, 0xb8,
+ 0x8b, 0xfa, 0xea, 0x83, 0x9e, 0x92, 0x3e, 0x70, 0x30, 0xab, 0x84, 0xe0, 0x46, 0x21, 0xc3, 0xc2,
+ 0x13, 0x46, 0x2b, 0x4e, 0x58, 0x11, 0xb1, 0x2a, 0xe8, 0xf6, 0xad, 0xdd, 0x0e, 0xf6, 0x56, 0x58,
+ 0x58, 0x0d, 0x7e, 0x5b, 0xb0, 0xa1, 0x6f, 0xe0, 0x13, 0x61, 0x49, 0xba, 0x6c, 0xfd, 0x83, 0x11,
+ 0xd8, 0x0b, 0x9a, 0x16, 0xe6, 0x07, 0x56, 0x35, 0x3a, 0x00, 0x5b, 0x7a, 0x54, 0x47, 0xf8, 0xff,
+ 0x7e, 0xbf, 0xdd, 0x95, 0x9e, 0x3c, 0x59, 0x16, 0x14, 0x2b, 0xb6, 0x0c, 0x9f, 0x7e, 0x53, 0x02,
+ 0xfb, 0xb1, 0xf0, 0x69, 0x1d, 0x36, 0xdc, 0xd7, 0x21, 0x40, 0x33, 0x09, 0x79, 0xd0, 0x3b, 0xbd,
+ 0x9c, 0x5e, 0x4c, 0xce, 0xb0, 0xff, 0x1f, 0x72, 0xc1, 0x19, 0x1d, 0x4f, 0x47, 0x67, 0xbe, 0x25,
+ 0xf1, 0xf1, 0x34, 0x0c, 0x8f, 0xf1, 0xb5, 0xbf, 0x26, 0x17, 0xd3, 0x8b, 0xc9, 0xf5, 0xd5, 0xd9,
+ 0x47, 0xbf, 0x83, 0x36, 0xc1, 0x3d, 0xff, 0x3c, 0x9e, 0x5c, 0x8e, 0xf0, 0x71, 0xe8, 0xdb, 0x27,
+ 0x18, 0x5a, 0x5f, 0xb2, 0x6f, 0x47, 0xf3, 0x84, 0x2f, 0xc4, 0x6c, 0x18, 0xe7, 0x6c, 0xaf, 0xe9,
+ 0xee, 0xe9, 0x6e, 0xc4, 0xf2, 0x1b, 0x9a, 0xee, 0xcd, 0xf3, 0x77, 0x49, 0x1e, 0x35, 0xdd, 0x48,
+ 0x77, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x45, 0x21, 0x7f, 0x64, 0x2b, 0x05, 0x00, 0x00,
+}
diff --git a/vendor/github.com/prometheus/common/README.md b/vendor/github.com/prometheus/common/README.md
index 11a584945..4dd257bdd 100644
--- a/vendor/github.com/prometheus/common/README.md
+++ b/vendor/github.com/prometheus/common/README.md
@@ -6,7 +6,11 @@ components and libraries.
* **config**: Common configuration structures
* **expfmt**: Decoding and encoding for the exposition format
-* **log**: A logging wrapper around [logrus](https://github.com/sirupsen/logrus)
* **model**: Shared data structures
+* **promlog**: A logging wrapper around [go-kit/log](https://github.com/go-kit/kit/tree/master/log)
* **route**: A routing wrapper around [httprouter](https://github.com/julienschmidt/httprouter) using `context.Context`
-* **version**: Version informations and metric
+* **server**: Common servers
+* **version**: Version information and metrics
+
+## Deprecated
+* **log**: A logging wrapper around [logrus](https://github.com/sirupsen/logrus)
diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go
index a7a42d5ef..c092723e8 100644
--- a/vendor/github.com/prometheus/common/expfmt/decode.go
+++ b/vendor/github.com/prometheus/common/expfmt/decode.go
@@ -164,9 +164,9 @@ func (sd *SampleDecoder) Decode(s *model.Vector) error {
}
// ExtractSamples builds a slice of samples from the provided metric
-// families. If an error occurs during sample extraction, it continues to
+// families. If an error occurrs during sample extraction, it continues to
// extract from the remaining metric families. The returned error is the last
-// error that has occured.
+// error that has occurred.
func ExtractSamples(o *DecodeOptions, fams ...*dto.MetricFamily) (model.Vector, error) {
var (
all model.Vector
diff --git a/vendor/github.com/prometheus/common/expfmt/expfmt.go b/vendor/github.com/prometheus/common/expfmt/expfmt.go
index 371ac7503..c71bcb981 100644
--- a/vendor/github.com/prometheus/common/expfmt/expfmt.go
+++ b/vendor/github.com/prometheus/common/expfmt/expfmt.go
@@ -26,7 +26,7 @@ const (
// The Content-Type values for the different wire protocols.
FmtUnknown Format = ``
- FmtText Format = `text/plain; version=` + TextVersion
+ FmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8`
FmtProtoDelim Format = ProtoFmt + ` encoding=delimited`
FmtProtoText Format = ProtoFmt + ` encoding=text`
FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text`
diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go
index f11321cd0..0327865ee 100644
--- a/vendor/github.com/prometheus/common/expfmt/text_create.go
+++ b/vendor/github.com/prometheus/common/expfmt/text_create.go
@@ -14,13 +14,45 @@
package expfmt
import (
+ "bufio"
"fmt"
"io"
+ "io/ioutil"
"math"
+ "strconv"
"strings"
+ "sync"
- dto "github.com/prometheus/client_model/go"
"github.com/prometheus/common/model"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+// enhancedWriter has all the enhanced write functions needed here. bufio.Writer
+// implements it.
+type enhancedWriter interface {
+ io.Writer
+ WriteRune(r rune) (n int, err error)
+ WriteString(s string) (n int, err error)
+ WriteByte(c byte) error
+}
+
+const (
+ initialNumBufSize = 24
+)
+
+var (
+ bufPool = sync.Pool{
+ New: func() interface{} {
+ return bufio.NewWriter(ioutil.Discard)
+ },
+ }
+ numBufPool = sync.Pool{
+ New: func() interface{} {
+ b := make([]byte, 0, initialNumBufSize)
+ return &b
+ },
+ }
)
// MetricFamilyToText converts a MetricFamily proto message into text format and
@@ -32,37 +64,90 @@ import (
// will result in invalid text format output.
//
// This method fulfills the type 'prometheus.encoder'.
-func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) {
- var written int
-
+func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err error) {
// Fail-fast checks.
if len(in.Metric) == 0 {
- return written, fmt.Errorf("MetricFamily has no metrics: %s", in)
+ return 0, fmt.Errorf("MetricFamily has no metrics: %s", in)
}
name := in.GetName()
if name == "" {
- return written, fmt.Errorf("MetricFamily has no name: %s", in)
+ return 0, fmt.Errorf("MetricFamily has no name: %s", in)
+ }
+
+ // Try the interface upgrade. If it doesn't work, we'll use a
+ // bufio.Writer from the sync.Pool.
+ w, ok := out.(enhancedWriter)
+ if !ok {
+ b := bufPool.Get().(*bufio.Writer)
+ b.Reset(out)
+ w = b
+ defer func() {
+ bErr := b.Flush()
+ if err == nil {
+ err = bErr
+ }
+ bufPool.Put(b)
+ }()
}
+ var n int
+
// Comments, first HELP, then TYPE.
if in.Help != nil {
- n, err := fmt.Fprintf(
- out, "# HELP %s %s\n",
- name, escapeString(*in.Help, false),
- )
+ n, err = w.WriteString("# HELP ")
written += n
if err != nil {
- return written, err
+ return
+ }
+ n, err = w.WriteString(name)
+ written += n
+ if err != nil {
+ return
+ }
+ err = w.WriteByte(' ')
+ written++
+ if err != nil {
+ return
}
+ n, err = writeEscapedString(w, *in.Help, false)
+ written += n
+ if err != nil {
+ return
+ }
+ err = w.WriteByte('\n')
+ written++
+ if err != nil {
+ return
+ }
+ }
+ n, err = w.WriteString("# TYPE ")
+ written += n
+ if err != nil {
+ return
+ }
+ n, err = w.WriteString(name)
+ written += n
+ if err != nil {
+ return
}
metricType := in.GetType()
- n, err := fmt.Fprintf(
- out, "# TYPE %s %s\n",
- name, strings.ToLower(metricType.String()),
- )
+ switch metricType {
+ case dto.MetricType_COUNTER:
+ n, err = w.WriteString(" counter\n")
+ case dto.MetricType_GAUGE:
+ n, err = w.WriteString(" gauge\n")
+ case dto.MetricType_SUMMARY:
+ n, err = w.WriteString(" summary\n")
+ case dto.MetricType_UNTYPED:
+ n, err = w.WriteString(" untyped\n")
+ case dto.MetricType_HISTOGRAM:
+ n, err = w.WriteString(" histogram\n")
+ default:
+ return written, fmt.Errorf("unknown metric type %s", metricType.String())
+ }
written += n
if err != nil {
- return written, err
+ return
}
// Finally the samples, one line for each.
@@ -75,9 +160,8 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) {
)
}
n, err = writeSample(
- name, metric, "", "",
+ w, name, "", metric, "", 0,
metric.Counter.GetValue(),
- out,
)
case dto.MetricType_GAUGE:
if metric.Gauge == nil {
@@ -86,9 +170,8 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) {
)
}
n, err = writeSample(
- name, metric, "", "",
+ w, name, "", metric, "", 0,
metric.Gauge.GetValue(),
- out,
)
case dto.MetricType_UNTYPED:
if metric.Untyped == nil {
@@ -97,9 +180,8 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) {
)
}
n, err = writeSample(
- name, metric, "", "",
+ w, name, "", metric, "", 0,
metric.Untyped.GetValue(),
- out,
)
case dto.MetricType_SUMMARY:
if metric.Summary == nil {
@@ -109,29 +191,26 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) {
}
for _, q := range metric.Summary.Quantile {
n, err = writeSample(
- name, metric,
- model.QuantileLabel, fmt.Sprint(q.GetQuantile()),
+ w, name, "", metric,
+ model.QuantileLabel, q.GetQuantile(),
q.GetValue(),
- out,
)
written += n
if err != nil {
- return written, err
+ return
}
}
n, err = writeSample(
- name+"_sum", metric, "", "",
+ w, name, "_sum", metric, "", 0,
metric.Summary.GetSampleSum(),
- out,
)
+ written += n
if err != nil {
- return written, err
+ return
}
- written += n
n, err = writeSample(
- name+"_count", metric, "", "",
+ w, name, "_count", metric, "", 0,
float64(metric.Summary.GetSampleCount()),
- out,
)
case dto.MetricType_HISTOGRAM:
if metric.Histogram == nil {
@@ -140,46 +219,42 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) {
)
}
infSeen := false
- for _, q := range metric.Histogram.Bucket {
+ for _, b := range metric.Histogram.Bucket {
n, err = writeSample(
- name+"_bucket", metric,
- model.BucketLabel, fmt.Sprint(q.GetUpperBound()),
- float64(q.GetCumulativeCount()),
- out,
+ w, name, "_bucket", metric,
+ model.BucketLabel, b.GetUpperBound(),
+ float64(b.GetCumulativeCount()),
)
written += n
if err != nil {
- return written, err
+ return
}
- if math.IsInf(q.GetUpperBound(), +1) {
+ if math.IsInf(b.GetUpperBound(), +1) {
infSeen = true
}
}
if !infSeen {
n, err = writeSample(
- name+"_bucket", metric,
- model.BucketLabel, "+Inf",
+ w, name, "_bucket", metric,
+ model.BucketLabel, math.Inf(+1),
float64(metric.Histogram.GetSampleCount()),
- out,
)
+ written += n
if err != nil {
- return written, err
+ return
}
- written += n
}
n, err = writeSample(
- name+"_sum", metric, "", "",
+ w, name, "_sum", metric, "", 0,
metric.Histogram.GetSampleSum(),
- out,
)
+ written += n
if err != nil {
- return written, err
+ return
}
- written += n
n, err = writeSample(
- name+"_count", metric, "", "",
+ w, name, "_count", metric, "", 0,
float64(metric.Histogram.GetSampleCount()),
- out,
)
default:
return written, fmt.Errorf(
@@ -188,116 +263,204 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) {
}
written += n
if err != nil {
- return written, err
+ return
}
}
- return written, nil
+ return
}
-// writeSample writes a single sample in text format to out, given the metric
+// writeSample writes a single sample in text format to w, given the metric
// name, the metric proto message itself, optionally an additional label name
-// and value (use empty strings if not required), and the value. The function
-// returns the number of bytes written and any error encountered.
+// with a float64 value (use empty string as label name if not required), and
+// the value. The function returns the number of bytes written and any error
+// encountered.
func writeSample(
- name string,
+ w enhancedWriter,
+ name, suffix string,
metric *dto.Metric,
- additionalLabelName, additionalLabelValue string,
+ additionalLabelName string, additionalLabelValue float64,
value float64,
- out io.Writer,
) (int, error) {
var written int
- n, err := fmt.Fprint(out, name)
+ n, err := w.WriteString(name)
written += n
if err != nil {
return written, err
}
- n, err = labelPairsToText(
- metric.Label,
- additionalLabelName, additionalLabelValue,
- out,
+ if suffix != "" {
+ n, err = w.WriteString(suffix)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ }
+ n, err = writeLabelPairs(
+ w, metric.Label, additionalLabelName, additionalLabelValue,
)
written += n
if err != nil {
return written, err
}
- n, err = fmt.Fprintf(out, " %v", value)
+ err = w.WriteByte(' ')
+ written++
+ if err != nil {
+ return written, err
+ }
+ n, err = writeFloat(w, value)
written += n
if err != nil {
return written, err
}
if metric.TimestampMs != nil {
- n, err = fmt.Fprintf(out, " %v", *metric.TimestampMs)
+ err = w.WriteByte(' ')
+ written++
+ if err != nil {
+ return written, err
+ }
+ n, err = writeInt(w, *metric.TimestampMs)
written += n
if err != nil {
return written, err
}
}
- n, err = out.Write([]byte{'\n'})
- written += n
+ err = w.WriteByte('\n')
+ written++
if err != nil {
return written, err
}
return written, nil
}
-// labelPairsToText converts a slice of LabelPair proto messages plus the
+// writeLabelPairs converts a slice of LabelPair proto messages plus the
// explicitly given additional label pair into text formatted as required by the
-// text format and writes it to 'out'. An empty slice in combination with an
-// empty string 'additionalLabelName' results in nothing being
-// written. Otherwise, the label pairs are written, escaped as required by the
-// text format, and enclosed in '{...}'. The function returns the number of
-// bytes written and any error encountered.
-func labelPairsToText(
+// text format and writes it to 'w'. An empty slice in combination with an empty
+// string 'additionalLabelName' results in nothing being written. Otherwise, the
+// label pairs are written, escaped as required by the text format, and enclosed
+// in '{...}'. The function returns the number of bytes written and any error
+// encountered.
+func writeLabelPairs(
+ w enhancedWriter,
in []*dto.LabelPair,
- additionalLabelName, additionalLabelValue string,
- out io.Writer,
+ additionalLabelName string, additionalLabelValue float64,
) (int, error) {
if len(in) == 0 && additionalLabelName == "" {
return 0, nil
}
- var written int
- separator := '{'
+ var (
+ written int
+ separator byte = '{'
+ )
for _, lp := range in {
- n, err := fmt.Fprintf(
- out, `%c%s="%s"`,
- separator, lp.GetName(), escapeString(lp.GetValue(), true),
- )
+ err := w.WriteByte(separator)
+ written++
+ if err != nil {
+ return written, err
+ }
+ n, err := w.WriteString(lp.GetName())
+ written += n
+ if err != nil {
+ return written, err
+ }
+ n, err = w.WriteString(`="`)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ n, err = writeEscapedString(w, lp.GetValue(), true)
written += n
if err != nil {
return written, err
}
+ err = w.WriteByte('"')
+ written++
+ if err != nil {
+ return written, err
+ }
separator = ','
}
if additionalLabelName != "" {
- n, err := fmt.Fprintf(
- out, `%c%s="%s"`,
- separator, additionalLabelName,
- escapeString(additionalLabelValue, true),
- )
+ err := w.WriteByte(separator)
+ written++
+ if err != nil {
+ return written, err
+ }
+ n, err := w.WriteString(additionalLabelName)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ n, err = w.WriteString(`="`)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ n, err = writeFloat(w, additionalLabelValue)
written += n
if err != nil {
return written, err
}
+ err = w.WriteByte('"')
+ written++
+ if err != nil {
+ return written, err
+ }
}
- n, err := out.Write([]byte{'}'})
- written += n
+ err := w.WriteByte('}')
+ written++
if err != nil {
return written, err
}
return written, nil
}
+// writeEscapedString replaces '\' by '\\', new line character by '\n', and - if
+// includeDoubleQuote is true - '"' by '\"'.
var (
- escape = strings.NewReplacer("\\", `\\`, "\n", `\n`)
- escapeWithDoubleQuote = strings.NewReplacer("\\", `\\`, "\n", `\n`, "\"", `\"`)
+ escaper = strings.NewReplacer("\\", `\\`, "\n", `\n`)
+ quotedEscaper = strings.NewReplacer("\\", `\\`, "\n", `\n`, "\"", `\"`)
)
-// escapeString replaces '\' by '\\', new line character by '\n', and - if
-// includeDoubleQuote is true - '"' by '\"'.
-func escapeString(v string, includeDoubleQuote bool) string {
+func writeEscapedString(w enhancedWriter, v string, includeDoubleQuote bool) (int, error) {
if includeDoubleQuote {
- return escapeWithDoubleQuote.Replace(v)
+ return quotedEscaper.WriteString(w, v)
+ } else {
+ return escaper.WriteString(w, v)
+ }
+}
+
+// writeFloat is equivalent to fmt.Fprint with a float64 argument but hardcodes
+// a few common cases for increased efficiency. For non-hardcoded cases, it uses
+// strconv.AppendFloat to avoid allocations, similar to writeInt.
+func writeFloat(w enhancedWriter, f float64) (int, error) {
+ switch {
+ case f == 1:
+ return 1, w.WriteByte('1')
+ case f == 0:
+ return 1, w.WriteByte('0')
+ case f == -1:
+ return w.WriteString("-1")
+ case math.IsNaN(f):
+ return w.WriteString("NaN")
+ case math.IsInf(f, +1):
+ return w.WriteString("+Inf")
+ case math.IsInf(f, -1):
+ return w.WriteString("-Inf")
+ default:
+ bp := numBufPool.Get().(*[]byte)
+ *bp = strconv.AppendFloat((*bp)[:0], f, 'g', -1, 64)
+ written, err := w.Write(*bp)
+ numBufPool.Put(bp)
+ return written, err
}
+}
- return escape.Replace(v)
+// writeInt is equivalent to fmt.Fprint with an int64 argument but uses
+// strconv.AppendInt with a byte slice taken from a sync.Pool to avoid
+// allocations.
+func writeInt(w enhancedWriter, i int64) (int, error) {
+ bp := numBufPool.Get().(*[]byte)
+ *bp = strconv.AppendInt((*bp)[:0], i, 10)
+ written, err := w.Write(*bp)
+ numBufPool.Put(bp)
+ return written, err
}
diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go
index 54bcfde29..342e5940d 100644
--- a/vendor/github.com/prometheus/common/expfmt/text_parse.go
+++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go
@@ -325,7 +325,7 @@ func (p *TextParser) startLabelValue() stateFn {
// - Other labels have to be added to currentLabels for signature calculation.
if p.currentMF.GetType() == dto.MetricType_SUMMARY {
if p.currentLabelPair.GetName() == model.QuantileLabel {
- if p.currentQuantile, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil {
+ if p.currentQuantile, p.err = parseFloat(p.currentLabelPair.GetValue()); p.err != nil {
// Create a more helpful error message.
p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue()))
return nil
@@ -337,7 +337,7 @@ func (p *TextParser) startLabelValue() stateFn {
// Similar special treatment of histograms.
if p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
if p.currentLabelPair.GetName() == model.BucketLabel {
- if p.currentBucket, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil {
+ if p.currentBucket, p.err = parseFloat(p.currentLabelPair.GetValue()); p.err != nil {
// Create a more helpful error message.
p.parseError(fmt.Sprintf("expected float as value for 'le' label, got %q", p.currentLabelPair.GetValue()))
return nil
@@ -359,7 +359,7 @@ func (p *TextParser) startLabelValue() stateFn {
}
return p.readingValue
default:
- p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.Value))
+ p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.GetValue()))
return nil
}
}
@@ -392,7 +392,7 @@ func (p *TextParser) readingValue() stateFn {
if p.readTokenUntilWhitespace(); p.err != nil {
return nil // Unexpected end of input.
}
- value, err := strconv.ParseFloat(p.currentToken.String(), 64)
+ value, err := parseFloat(p.currentToken.String())
if err != nil {
// Create a more helpful error message.
p.parseError(fmt.Sprintf("expected float as value, got %q", p.currentToken.String()))
@@ -556,8 +556,8 @@ func (p *TextParser) readTokenUntilWhitespace() {
// byte considered is the byte already read (now in p.currentByte). The first
// newline byte encountered is still copied into p.currentByte, but not into
// p.currentToken. If recognizeEscapeSequence is true, two escape sequences are
-// recognized: '\\' tranlates into '\', and '\n' into a line-feed character. All
-// other escape sequences are invalid and cause an error.
+// recognized: '\\' translates into '\', and '\n' into a line-feed character.
+// All other escape sequences are invalid and cause an error.
func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) {
p.currentToken.Reset()
escaped := false
@@ -755,3 +755,10 @@ func histogramMetricName(name string) string {
return name
}
}
+
+func parseFloat(s string) (float64, error) {
+ if strings.ContainsAny(s, "pP_") {
+ return 0, fmt.Errorf("unsupported character in float")
+ }
+ return strconv.ParseFloat(s, 64)
+}
diff --git a/vendor/github.com/prometheus/common/go.mod b/vendor/github.com/prometheus/common/go.mod
new file mode 100644
index 000000000..ccdb3ca54
--- /dev/null
+++ b/vendor/github.com/prometheus/common/go.mod
@@ -0,0 +1,22 @@
+module github.com/prometheus/common
+
+require (
+ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 // indirect
+ github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4 // indirect
+ github.com/go-kit/kit v0.9.0
+ github.com/go-logfmt/logfmt v0.4.0 // indirect
+ github.com/golang/protobuf v1.3.2
+ github.com/julienschmidt/httprouter v1.2.0
+ github.com/matttproud/golang_protobuf_extensions v1.0.1
+ github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223
+ github.com/pkg/errors v0.8.1
+ github.com/prometheus/client_golang v1.0.0
+ github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90
+ github.com/sirupsen/logrus v1.4.2
+ golang.org/x/net v0.0.0-20190613194153-d28f0bde5980 // indirect
+ golang.org/x/sys v0.0.0-20190422165155-953cdadca894
+ gopkg.in/alecthomas/kingpin.v2 v2.2.6
+ gopkg.in/yaml.v2 v2.2.2
+)
+
+go 1.11
diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go
index 648b38cb6..26e92288c 100644
--- a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go
+++ b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go
@@ -1,12 +1,12 @@
/*
+Copyright (c) 2011, Open Knowledge Foundation Ltd.
+All rights reserved.
+
HTTP Content-Type Autonegotiation.
The functions in this package implement the behaviour specified in
http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
-Copyright (c) 2011, Open Knowledge Foundation Ltd.
-All rights reserved.
-
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go
index f7250909b..00804b7fe 100644
--- a/vendor/github.com/prometheus/common/model/metric.go
+++ b/vendor/github.com/prometheus/common/model/metric.go
@@ -21,7 +21,6 @@ import (
)
var (
- separator = []byte{0}
// MetricNameRE is a regular expression matching valid metric
// names. Note that the IsValidMetricName function performs the same
// check but faster than a match with this regular expression.
diff --git a/vendor/github.com/prometheus/common/model/silence.go b/vendor/github.com/prometheus/common/model/silence.go
index 7538e2997..bb99889d2 100644
--- a/vendor/github.com/prometheus/common/model/silence.go
+++ b/vendor/github.com/prometheus/common/model/silence.go
@@ -59,8 +59,8 @@ func (m *Matcher) Validate() error {
return nil
}
-// Silence defines the representation of a silence definiton
-// in the Prometheus eco-system.
+// Silence defines the representation of a silence definition in the Prometheus
+// eco-system.
type Silence struct {
ID uint64 `json:"id,omitempty"`
diff --git a/vendor/github.com/prometheus/common/model/time.go b/vendor/github.com/prometheus/common/model/time.go
index 74ed5a9f7..7b0064fdb 100644
--- a/vendor/github.com/prometheus/common/model/time.go
+++ b/vendor/github.com/prometheus/common/model/time.go
@@ -43,7 +43,7 @@ const (
// (1970-01-01 00:00 UTC) excluding leap seconds.
type Time int64
-// Interval describes and interval between two timestamps.
+// Interval describes an interval between two timestamps.
type Interval struct {
Start, End Time
}
@@ -150,7 +150,13 @@ func (t *Time) UnmarshalJSON(b []byte) error {
return err
}
- *t = Time(v + va)
+ // If the value was something like -0.1 the negative is lost in the
+ // parsing because of the leading zero, this ensures that we capture it.
+ if len(p[0]) > 0 && p[0][0] == '-' && v+va > 0 {
+ *t = Time(v+va) * -1
+ } else {
+ *t = Time(v + va)
+ }
default:
return fmt.Errorf("invalid time %q", string(b))
diff --git a/vendor/github.com/prometheus/common/model/value.go b/vendor/github.com/prometheus/common/model/value.go
index c9ed3ffd8..c9d8fb1a2 100644
--- a/vendor/github.com/prometheus/common/model/value.go
+++ b/vendor/github.com/prometheus/common/model/value.go
@@ -100,7 +100,7 @@ func (s *SamplePair) UnmarshalJSON(b []byte) error {
}
// Equal returns true if this SamplePair and o have equal Values and equal
-// Timestamps. The sematics of Value equality is defined by SampleValue.Equal.
+// Timestamps. The semantics of Value equality is defined by SampleValue.Equal.
func (s *SamplePair) Equal(o *SamplePair) bool {
return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp))
}
@@ -117,7 +117,7 @@ type Sample struct {
}
// Equal compares first the metrics, then the timestamp, then the value. The
-// sematics of value equality is defined by SampleValue.Equal.
+// semantics of value equality is defined by SampleValue.Equal.
func (s *Sample) Equal(o *Sample) bool {
if s == o {
return true
diff --git a/vendor/github.com/prometheus/procfs/README.md b/vendor/github.com/prometheus/procfs/README.md
index 209549471..55d1e3261 100644
--- a/vendor/github.com/prometheus/procfs/README.md
+++ b/vendor/github.com/prometheus/procfs/README.md
@@ -1,7 +1,7 @@
# procfs
-This procfs package provides functions to retrieve system, kernel and process
-metrics from the pseudo-filesystem proc.
+This package provides functions to retrieve system, kernel, and process
+metrics from the pseudo-filesystems /proc and /sys.
*WARNING*: This package is a work in progress. Its API may still break in
backwards-incompatible ways without warnings. Use it at your own risk.
@@ -9,3 +9,53 @@ backwards-incompatible ways without warnings. Use it at your own risk.
[![GoDoc](https://godoc.org/github.com/prometheus/procfs?status.png)](https://godoc.org/github.com/prometheus/procfs)
[![Build Status](https://travis-ci.org/prometheus/procfs.svg?branch=master)](https://travis-ci.org/prometheus/procfs)
[![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/procfs)](https://goreportcard.com/report/github.com/prometheus/procfs)
+
+## Usage
+
+The procfs library is organized by packages based on whether the gathered data is coming from
+/proc, /sys, or both. Each package contains an `FS` type which represents the path to either /proc,
+/sys, or both. For example, cpu statistics are gathered from
+`/proc/stat` and are available via the root procfs package. First, the proc filesystem mount
+point is initialized, and then the stat information is read.
+
+```go
+fs, err := procfs.NewFS("/proc")
+stats, err := fs.Stat()
+```
+
+Some sub-packages such as `blockdevice`, require access to both the proc and sys filesystems.
+
+```go
+ fs, err := blockdevice.NewFS("/proc", "/sys")
+ stats, err := fs.ProcDiskstats()
+```
+
+## Package Organization
+
+The packages in this project are organized according to (1) whether the data comes from the `/proc` or
+`/sys` filesystem and (2) the type of information being retrieved. For example, most process information
+can be gathered from the functions in the root `procfs` package. Information about block devices such as disk drives
+is available in the `blockdevices` sub-package.
+
+## Building and Testing
+
+The procfs library is intended to be built as part of another application, so there are no distributable binaries.
+However, most of the API includes unit tests which can be run with `make test`.
+
+### Updating Test Fixtures
+
+The procfs library includes a set of test fixtures which include many example files from
+the `/proc` and `/sys` filesystems. These fixtures are included as a [ttar](https://github.com/ideaship/ttar) file
+which is extracted automatically during testing. To add/update the test fixtures, first
+ensure the `fixtures` directory is up to date by removing the existing directory and then
+extracting the ttar file using `make fixtures/.unpacked` or just `make test`.
+
+```bash
+rm -rf fixtures
+make test
+```
+
+Next, make the required changes to the extracted files in the `fixtures` directory. When
+the changes are complete, run `make update_fixtures` to create a new `fixtures.ttar` file
+based on the updated `fixtures` directory. And finally, verify the changes using
+`git diff fixtures.ttar`.
diff --git a/vendor/github.com/prometheus/procfs/arp.go b/vendor/github.com/prometheus/procfs/arp.go
new file mode 100644
index 000000000..916c9182a
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/arp.go
@@ -0,0 +1,85 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "fmt"
+ "io/ioutil"
+ "net"
+ "strings"
+)
+
+// ARPEntry contains a single row of the columnar data represented in
+// /proc/net/arp.
+type ARPEntry struct {
+ // IP address
+ IPAddr net.IP
+ // MAC address
+ HWAddr net.HardwareAddr
+ // Name of the device
+ Device string
+}
+
+// GatherARPEntries retrieves all the ARP entries, parse the relevant columns,
+// and then return a slice of ARPEntry's.
+func (fs FS) GatherARPEntries() ([]ARPEntry, error) {
+ data, err := ioutil.ReadFile(fs.proc.Path("net/arp"))
+ if err != nil {
+ return nil, fmt.Errorf("error reading arp %s: %s", fs.proc.Path("net/arp"), err)
+ }
+
+ return parseARPEntries(data)
+}
+
+func parseARPEntries(data []byte) ([]ARPEntry, error) {
+ lines := strings.Split(string(data), "\n")
+ entries := make([]ARPEntry, 0)
+ var err error
+ const (
+ expectedDataWidth = 6
+ expectedHeaderWidth = 9
+ )
+ for _, line := range lines {
+ columns := strings.Fields(line)
+ width := len(columns)
+
+ if width == expectedHeaderWidth || width == 0 {
+ continue
+ } else if width == expectedDataWidth {
+ entry, err := parseARPEntry(columns)
+ if err != nil {
+ return []ARPEntry{}, fmt.Errorf("failed to parse ARP entry: %s", err)
+ }
+ entries = append(entries, entry)
+ } else {
+ return []ARPEntry{}, fmt.Errorf("%d columns were detected, but %d were expected", width, expectedDataWidth)
+ }
+
+ }
+
+ return entries, err
+}
+
+func parseARPEntry(columns []string) (ARPEntry, error) {
+ ip := net.ParseIP(columns[0])
+ mac := net.HardwareAddr(columns[3])
+
+ entry := ARPEntry{
+ IPAddr: ip,
+ HWAddr: mac,
+ Device: columns[5],
+ }
+
+ return entry, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/buddyinfo.go b/vendor/github.com/prometheus/procfs/buddyinfo.go
index d3a826807..10bd067a0 100644
--- a/vendor/github.com/prometheus/procfs/buddyinfo.go
+++ b/vendor/github.com/prometheus/procfs/buddyinfo.go
@@ -31,19 +31,9 @@ type BuddyInfo struct {
Sizes []float64
}
-// NewBuddyInfo reads the buddyinfo statistics.
-func NewBuddyInfo() ([]BuddyInfo, error) {
- fs, err := NewFS(DefaultMountPoint)
- if err != nil {
- return nil, err
- }
-
- return fs.NewBuddyInfo()
-}
-
-// NewBuddyInfo reads the buddyinfo statistics from the specified `proc` filesystem.
-func (fs FS) NewBuddyInfo() ([]BuddyInfo, error) {
- file, err := os.Open(fs.Path("buddyinfo"))
+// BuddyInfo reads the buddyinfo statistics from the specified `proc` filesystem.
+func (fs FS) BuddyInfo() ([]BuddyInfo, error) {
+ file, err := os.Open(fs.proc.Path("buddyinfo"))
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo.go b/vendor/github.com/prometheus/procfs/cpuinfo.go
new file mode 100644
index 000000000..2e0221552
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/cpuinfo.go
@@ -0,0 +1,167 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "bytes"
+ "strconv"
+ "strings"
+
+ "github.com/prometheus/procfs/internal/util"
+)
+
+// CPUInfo contains general information about a system CPU found in /proc/cpuinfo
+type CPUInfo struct {
+ Processor uint
+ VendorID string
+ CPUFamily string
+ Model string
+ ModelName string
+ Stepping string
+ Microcode string
+ CPUMHz float64
+ CacheSize string
+ PhysicalID string
+ Siblings uint
+ CoreID string
+ CPUCores uint
+ APICID string
+ InitialAPICID string
+ FPU string
+ FPUException string
+ CPUIDLevel uint
+ WP string
+ Flags []string
+ Bugs []string
+ BogoMips float64
+ CLFlushSize uint
+ CacheAlignment uint
+ AddressSizes string
+ PowerManagement string
+}
+
+// CPUInfo returns information about current system CPUs.
+// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt
+func (fs FS) CPUInfo() ([]CPUInfo, error) {
+ data, err := util.ReadFileNoStat(fs.proc.Path("cpuinfo"))
+ if err != nil {
+ return nil, err
+ }
+ return parseCPUInfo(data)
+}
+
+// parseCPUInfo parses data from /proc/cpuinfo
+func parseCPUInfo(info []byte) ([]CPUInfo, error) {
+ cpuinfo := []CPUInfo{}
+ i := -1
+ scanner := bufio.NewScanner(bytes.NewReader(info))
+ for scanner.Scan() {
+ line := scanner.Text()
+ if strings.TrimSpace(line) == "" {
+ continue
+ }
+ field := strings.SplitN(line, ": ", 2)
+ switch strings.TrimSpace(field[0]) {
+ case "processor":
+ cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor
+ i++
+ v, err := strconv.ParseUint(field[1], 0, 32)
+ if err != nil {
+ return nil, err
+ }
+ cpuinfo[i].Processor = uint(v)
+ case "vendor_id":
+ cpuinfo[i].VendorID = field[1]
+ case "cpu family":
+ cpuinfo[i].CPUFamily = field[1]
+ case "model":
+ cpuinfo[i].Model = field[1]
+ case "model name":
+ cpuinfo[i].ModelName = field[1]
+ case "stepping":
+ cpuinfo[i].Stepping = field[1]
+ case "microcode":
+ cpuinfo[i].Microcode = field[1]
+ case "cpu MHz":
+ v, err := strconv.ParseFloat(field[1], 64)
+ if err != nil {
+ return nil, err
+ }
+ cpuinfo[i].CPUMHz = v
+ case "cache size":
+ cpuinfo[i].CacheSize = field[1]
+ case "physical id":
+ cpuinfo[i].PhysicalID = field[1]
+ case "siblings":
+ v, err := strconv.ParseUint(field[1], 0, 32)
+ if err != nil {
+ return nil, err
+ }
+ cpuinfo[i].Siblings = uint(v)
+ case "core id":
+ cpuinfo[i].CoreID = field[1]
+ case "cpu cores":
+ v, err := strconv.ParseUint(field[1], 0, 32)
+ if err != nil {
+ return nil, err
+ }
+ cpuinfo[i].CPUCores = uint(v)
+ case "apicid":
+ cpuinfo[i].APICID = field[1]
+ case "initial apicid":
+ cpuinfo[i].InitialAPICID = field[1]
+ case "fpu":
+ cpuinfo[i].FPU = field[1]
+ case "fpu_exception":
+ cpuinfo[i].FPUException = field[1]
+ case "cpuid level":
+ v, err := strconv.ParseUint(field[1], 0, 32)
+ if err != nil {
+ return nil, err
+ }
+ cpuinfo[i].CPUIDLevel = uint(v)
+ case "wp":
+ cpuinfo[i].WP = field[1]
+ case "flags":
+ cpuinfo[i].Flags = strings.Fields(field[1])
+ case "bugs":
+ cpuinfo[i].Bugs = strings.Fields(field[1])
+ case "bogomips":
+ v, err := strconv.ParseFloat(field[1], 64)
+ if err != nil {
+ return nil, err
+ }
+ cpuinfo[i].BogoMips = v
+ case "clflush size":
+ v, err := strconv.ParseUint(field[1], 0, 32)
+ if err != nil {
+ return nil, err
+ }
+ cpuinfo[i].CLFlushSize = uint(v)
+ case "cache_alignment":
+ v, err := strconv.ParseUint(field[1], 0, 32)
+ if err != nil {
+ return nil, err
+ }
+ cpuinfo[i].CacheAlignment = uint(v)
+ case "address sizes":
+ cpuinfo[i].AddressSizes = field[1]
+ case "power management":
+ cpuinfo[i].PowerManagement = field[1]
+ }
+ }
+ return cpuinfo, nil
+
+}
diff --git a/vendor/github.com/prometheus/procfs/crypto.go b/vendor/github.com/prometheus/procfs/crypto.go
new file mode 100644
index 000000000..19d4041b2
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/crypto.go
@@ -0,0 +1,131 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "strconv"
+ "strings"
+
+ "github.com/prometheus/procfs/internal/util"
+)
+
+// Crypto holds info parsed from /proc/crypto.
+type Crypto struct {
+ Alignmask *uint64
+ Async bool
+ Blocksize *uint64
+ Chunksize *uint64
+ Ctxsize *uint64
+ Digestsize *uint64
+ Driver string
+ Geniv string
+ Internal string
+ Ivsize *uint64
+ Maxauthsize *uint64
+ MaxKeysize *uint64
+ MinKeysize *uint64
+ Module string
+ Name string
+ Priority *int64
+ Refcnt *int64
+ Seedsize *uint64
+ Selftest string
+ Type string
+ Walksize *uint64
+}
+
+// Crypto parses an crypto-file (/proc/crypto) and returns a slice of
+// structs containing the relevant info. More information available here:
+// https://kernel.readthedocs.io/en/sphinx-samples/crypto-API.html
+func (fs FS) Crypto() ([]Crypto, error) {
+ data, err := ioutil.ReadFile(fs.proc.Path("crypto"))
+ if err != nil {
+ return nil, fmt.Errorf("error parsing crypto %s: %s", fs.proc.Path("crypto"), err)
+ }
+ crypto, err := parseCrypto(data)
+ if err != nil {
+ return nil, fmt.Errorf("error parsing crypto %s: %s", fs.proc.Path("crypto"), err)
+ }
+ return crypto, nil
+}
+
+func parseCrypto(cryptoData []byte) ([]Crypto, error) {
+ crypto := []Crypto{}
+
+ cryptoBlocks := bytes.Split(cryptoData, []byte("\n\n"))
+
+ for _, block := range cryptoBlocks {
+ var newCryptoElem Crypto
+
+ lines := strings.Split(string(block), "\n")
+ for _, line := range lines {
+ if strings.TrimSpace(line) == "" || line[0] == ' ' {
+ continue
+ }
+ fields := strings.Split(line, ":")
+ key := strings.TrimSpace(fields[0])
+ value := strings.TrimSpace(fields[1])
+ vp := util.NewValueParser(value)
+
+ switch strings.TrimSpace(key) {
+ case "async":
+ b, err := strconv.ParseBool(value)
+ if err == nil {
+ newCryptoElem.Async = b
+ }
+ case "blocksize":
+ newCryptoElem.Blocksize = vp.PUInt64()
+ case "chunksize":
+ newCryptoElem.Chunksize = vp.PUInt64()
+ case "digestsize":
+ newCryptoElem.Digestsize = vp.PUInt64()
+ case "driver":
+ newCryptoElem.Driver = value
+ case "geniv":
+ newCryptoElem.Geniv = value
+ case "internal":
+ newCryptoElem.Internal = value
+ case "ivsize":
+ newCryptoElem.Ivsize = vp.PUInt64()
+ case "maxauthsize":
+ newCryptoElem.Maxauthsize = vp.PUInt64()
+ case "max keysize":
+ newCryptoElem.MaxKeysize = vp.PUInt64()
+ case "min keysize":
+ newCryptoElem.MinKeysize = vp.PUInt64()
+ case "module":
+ newCryptoElem.Module = value
+ case "name":
+ newCryptoElem.Name = value
+ case "priority":
+ newCryptoElem.Priority = vp.PInt64()
+ case "refcnt":
+ newCryptoElem.Refcnt = vp.PInt64()
+ case "seedsize":
+ newCryptoElem.Seedsize = vp.PUInt64()
+ case "selftest":
+ newCryptoElem.Selftest = value
+ case "type":
+ newCryptoElem.Type = value
+ case "walksize":
+ newCryptoElem.Walksize = vp.PUInt64()
+ }
+ }
+ crypto = append(crypto, newCryptoElem)
+ }
+ return crypto, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/fs.go b/vendor/github.com/prometheus/procfs/fs.go
index 36c1586a1..0102ab0fd 100644
--- a/vendor/github.com/prometheus/procfs/fs.go
+++ b/vendor/github.com/prometheus/procfs/fs.go
@@ -1,69 +1,43 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
package procfs
import (
- "fmt"
- "os"
- "path"
-
- "github.com/prometheus/procfs/nfs"
- "github.com/prometheus/procfs/xfs"
+ "github.com/prometheus/procfs/internal/fs"
)
-// FS represents the pseudo-filesystem proc, which provides an interface to
+// FS represents the pseudo-filesystem sys, which provides an interface to
// kernel data structures.
-type FS string
-
-// DefaultMountPoint is the common mount point of the proc filesystem.
-const DefaultMountPoint = "/proc"
-
-// NewFS returns a new FS mounted under the given mountPoint. It will error
-// if the mount point can't be read.
-func NewFS(mountPoint string) (FS, error) {
- info, err := os.Stat(mountPoint)
- if err != nil {
- return "", fmt.Errorf("could not read %s: %s", mountPoint, err)
- }
- if !info.IsDir() {
- return "", fmt.Errorf("mount point %s is not a directory", mountPoint)
- }
-
- return FS(mountPoint), nil
-}
-
-// Path returns the path of the given subsystem relative to the procfs root.
-func (fs FS) Path(p ...string) string {
- return path.Join(append([]string{string(fs)}, p...)...)
-}
-
-// XFSStats retrieves XFS filesystem runtime statistics.
-func (fs FS) XFSStats() (*xfs.Stats, error) {
- f, err := os.Open(fs.Path("fs/xfs/stat"))
- if err != nil {
- return nil, err
- }
- defer f.Close()
-
- return xfs.ParseStats(f)
+type FS struct {
+ proc fs.FS
}
-// NFSdClientRPCStats retrieves NFS daemon RPC statistics.
-func (fs FS) NFSdClientRPCStats() (*nfs.ClientRPCStats, error) {
- f, err := os.Open(fs.Path("net/rpc/nfs"))
- if err != nil {
- return nil, err
- }
- defer f.Close()
+// DefaultMountPoint is the common mount point of the proc filesystem.
+const DefaultMountPoint = fs.DefaultProcMountPoint
- return nfs.ParseClientRPCStats(f)
+// NewDefaultFS returns a new proc FS mounted under the default proc mountPoint.
+// It will error if the mount point directory can't be read or is a file.
+func NewDefaultFS() (FS, error) {
+ return NewFS(DefaultMountPoint)
}
-// NFSdServerRPCStats retrieves NFS daemon RPC statistics.
-func (fs FS) NFSdServerRPCStats() (*nfs.ServerRPCStats, error) {
- f, err := os.Open(fs.Path("net/rpc/nfsd"))
+// NewFS returns a new proc FS mounted under the given proc mountPoint. It will error
+// if the mount point directory can't be read or is a file.
+func NewFS(mountPoint string) (FS, error) {
+ fs, err := fs.NewFS(mountPoint)
if err != nil {
- return nil, err
+ return FS{}, err
}
- defer f.Close()
-
- return nfs.ParseServerRPCStats(f)
+ return FS{fs}, nil
}
diff --git a/vendor/github.com/prometheus/procfs/go.mod b/vendor/github.com/prometheus/procfs/go.mod
new file mode 100644
index 000000000..0e04e5d1f
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/go.mod
@@ -0,0 +1,8 @@
+module github.com/prometheus/procfs
+
+go 1.12
+
+require (
+ github.com/google/go-cmp v0.3.1
+ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e
+)
diff --git a/vendor/github.com/prometheus/procfs/internal/fs/fs.go b/vendor/github.com/prometheus/procfs/internal/fs/fs.go
new file mode 100644
index 000000000..565e89e42
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/internal/fs/fs.go
@@ -0,0 +1,55 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package fs
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+)
+
+const (
+ // DefaultProcMountPoint is the common mount point of the proc filesystem.
+ DefaultProcMountPoint = "/proc"
+
+ // DefaultSysMountPoint is the common mount point of the sys filesystem.
+ DefaultSysMountPoint = "/sys"
+
+ // DefaultConfigfsMountPoint is the common mount point of the configfs
+ DefaultConfigfsMountPoint = "/sys/kernel/config"
+)
+
+// FS represents a pseudo-filesystem, normally /proc or /sys, which provides an
+// interface to kernel data structures.
+type FS string
+
+// NewFS returns a new FS mounted under the given mountPoint. It will error
+// if the mount point can't be read.
+func NewFS(mountPoint string) (FS, error) {
+ info, err := os.Stat(mountPoint)
+ if err != nil {
+ return "", fmt.Errorf("could not read %s: %s", mountPoint, err)
+ }
+ if !info.IsDir() {
+ return "", fmt.Errorf("mount point %s is not a directory", mountPoint)
+ }
+
+ return FS(mountPoint), nil
+}
+
+// Path appends the given path elements to the filesystem path, adding separators
+// as necessary.
+func (fs FS) Path(p ...string) string {
+ return filepath.Join(append([]string{string(fs)}, p...)...)
+}
diff --git a/vendor/github.com/prometheus/procfs/internal/util/parse.go b/vendor/github.com/prometheus/procfs/internal/util/parse.go
index 1ad21c91a..755591d9a 100644
--- a/vendor/github.com/prometheus/procfs/internal/util/parse.go
+++ b/vendor/github.com/prometheus/procfs/internal/util/parse.go
@@ -13,7 +13,11 @@
package util
-import "strconv"
+import (
+ "io/ioutil"
+ "strconv"
+ "strings"
+)
// ParseUint32s parses a slice of strings into a slice of uint32s.
func ParseUint32s(ss []string) ([]uint32, error) {
@@ -44,3 +48,41 @@ func ParseUint64s(ss []string) ([]uint64, error) {
return us, nil
}
+
+// ParsePInt64s parses a slice of strings into a slice of int64 pointers.
+func ParsePInt64s(ss []string) ([]*int64, error) {
+ us := make([]*int64, 0, len(ss))
+ for _, s := range ss {
+ u, err := strconv.ParseInt(s, 10, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ us = append(us, &u)
+ }
+
+ return us, nil
+}
+
+// ReadUintFromFile reads a file and attempts to parse a uint64 from it.
+func ReadUintFromFile(path string) (uint64, error) {
+ data, err := ioutil.ReadFile(path)
+ if err != nil {
+ return 0, err
+ }
+ return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64)
+}
+
+// ParseBool parses a string into a boolean pointer.
+func ParseBool(b string) *bool {
+ var truth bool
+ switch b {
+ case "enabled":
+ truth = true
+ case "disabled":
+ truth = false
+ default:
+ return nil
+ }
+ return &truth
+}
diff --git a/vendor/github.com/prometheus/procfs/internal/util/readfile.go b/vendor/github.com/prometheus/procfs/internal/util/readfile.go
new file mode 100644
index 000000000..8051161b2
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/internal/util/readfile.go
@@ -0,0 +1,38 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package util
+
+import (
+ "io"
+ "io/ioutil"
+ "os"
+)
+
+// ReadFileNoStat uses ioutil.ReadAll to read contents of entire file.
+// This is similar to ioutil.ReadFile but without the call to os.Stat, because
+// many files in /proc and /sys report incorrect file sizes (either 0 or 4096).
+// Reads a max file size of 512kB. For files larger than this, a scanner
+// should be used.
+func ReadFileNoStat(filename string) ([]byte, error) {
+ const maxBufferSize = 1024 * 512
+
+ f, err := os.Open(filename)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ reader := io.LimitReader(f, maxBufferSize)
+ return ioutil.ReadAll(reader)
+}
diff --git a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go
new file mode 100644
index 000000000..c07de0b6c
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go
@@ -0,0 +1,48 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build linux,!appengine
+
+package util
+
+import (
+ "bytes"
+ "os"
+ "syscall"
+)
+
+// SysReadFile is a simplified ioutil.ReadFile that invokes syscall.Read directly.
+// https://github.com/prometheus/node_exporter/pull/728/files
+//
+// Note that this function will not read files larger than 128 bytes.
+func SysReadFile(file string) (string, error) {
+ f, err := os.Open(file)
+ if err != nil {
+ return "", err
+ }
+ defer f.Close()
+
+ // On some machines, hwmon drivers are broken and return EAGAIN. This causes
+ // Go's ioutil.ReadFile implementation to poll forever.
+ //
+ // Since we either want to read data or bail immediately, do the simplest
+ // possible read using syscall directly.
+ const sysFileBufferSize = 128
+ b := make([]byte, sysFileBufferSize)
+ n, err := syscall.Read(int(f.Fd()), b)
+ if err != nil {
+ return "", err
+ }
+
+ return string(bytes.TrimSpace(b[:n])), nil
+}
diff --git a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go
new file mode 100644
index 000000000..bd55b4537
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go
@@ -0,0 +1,26 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build linux,appengine !linux
+
+package util
+
+import (
+ "fmt"
+)
+
+// SysReadFile is here implemented as a noop for builds that do not support
+// the read syscall. For example Windows, or Linux on Google App Engine.
+func SysReadFile(file string) (string, error) {
+ return "", fmt.Errorf("not supported on this platform")
+}
diff --git a/vendor/github.com/prometheus/procfs/internal/util/valueparser.go b/vendor/github.com/prometheus/procfs/internal/util/valueparser.go
new file mode 100644
index 000000000..fe2355d3c
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/internal/util/valueparser.go
@@ -0,0 +1,91 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package util
+
+import (
+ "strconv"
+)
+
+// TODO(mdlayher): util packages are an anti-pattern and this should be moved
+// somewhere else that is more focused in the future.
+
+// A ValueParser enables parsing a single string into a variety of data types
+// in a concise and safe way. The Err method must be invoked after invoking
+// any other methods to ensure a value was successfully parsed.
+type ValueParser struct {
+ v string
+ err error
+}
+
+// NewValueParser creates a ValueParser using the input string.
+func NewValueParser(v string) *ValueParser {
+ return &ValueParser{v: v}
+}
+
+// Int interprets the underlying value as an int and returns that value.
+func (vp *ValueParser) Int() int { return int(vp.int64()) }
+
+// PInt64 interprets the underlying value as an int64 and returns a pointer to
+// that value.
+func (vp *ValueParser) PInt64() *int64 {
+ if vp.err != nil {
+ return nil
+ }
+
+ v := vp.int64()
+ return &v
+}
+
+// int64 interprets the underlying value as an int64 and returns that value.
+// TODO: export if/when necessary.
+func (vp *ValueParser) int64() int64 {
+ if vp.err != nil {
+ return 0
+ }
+
+ // A base value of zero makes ParseInt infer the correct base using the
+ // string's prefix, if any.
+ const base = 0
+ v, err := strconv.ParseInt(vp.v, base, 64)
+ if err != nil {
+ vp.err = err
+ return 0
+ }
+
+ return v
+}
+
+// PUInt64 interprets the underlying value as an uint64 and returns a pointer to
+// that value.
+func (vp *ValueParser) PUInt64() *uint64 {
+ if vp.err != nil {
+ return nil
+ }
+
+ // A base value of zero makes ParseInt infer the correct base using the
+ // string's prefix, if any.
+ const base = 0
+ v, err := strconv.ParseUint(vp.v, base, 64)
+ if err != nil {
+ vp.err = err
+ return nil
+ }
+
+ return &v
+}
+
+// Err returns the last error, if any, encountered by the ValueParser.
+func (vp *ValueParser) Err() error {
+ return vp.err
+}
diff --git a/vendor/github.com/prometheus/procfs/ipvs.go b/vendor/github.com/prometheus/procfs/ipvs.go
index 5761b4570..89e447746 100644
--- a/vendor/github.com/prometheus/procfs/ipvs.go
+++ b/vendor/github.com/prometheus/procfs/ipvs.go
@@ -1,7 +1,21 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
package procfs
import (
"bufio"
+ "bytes"
"encoding/hex"
"errors"
"fmt"
@@ -11,6 +25,8 @@ import (
"os"
"strconv"
"strings"
+
+ "github.com/prometheus/procfs/internal/util"
)
// IPVSStats holds IPVS statistics, as exposed by the kernel in `/proc/net/ip_vs_stats`.
@@ -49,29 +65,18 @@ type IPVSBackendStatus struct {
Weight uint64
}
-// NewIPVSStats reads the IPVS statistics.
-func NewIPVSStats() (IPVSStats, error) {
- fs, err := NewFS(DefaultMountPoint)
- if err != nil {
- return IPVSStats{}, err
- }
-
- return fs.NewIPVSStats()
-}
-
-// NewIPVSStats reads the IPVS statistics from the specified `proc` filesystem.
-func (fs FS) NewIPVSStats() (IPVSStats, error) {
- file, err := os.Open(fs.Path("net/ip_vs_stats"))
+// IPVSStats reads the IPVS statistics from the specified `proc` filesystem.
+func (fs FS) IPVSStats() (IPVSStats, error) {
+ data, err := util.ReadFileNoStat(fs.proc.Path("net/ip_vs_stats"))
if err != nil {
return IPVSStats{}, err
}
- defer file.Close()
- return parseIPVSStats(file)
+ return parseIPVSStats(bytes.NewReader(data))
}
// parseIPVSStats performs the actual parsing of `ip_vs_stats`.
-func parseIPVSStats(file io.Reader) (IPVSStats, error) {
+func parseIPVSStats(r io.Reader) (IPVSStats, error) {
var (
statContent []byte
statLines []string
@@ -79,7 +84,7 @@ func parseIPVSStats(file io.Reader) (IPVSStats, error) {
stats IPVSStats
)
- statContent, err := ioutil.ReadAll(file)
+ statContent, err := ioutil.ReadAll(r)
if err != nil {
return IPVSStats{}, err
}
@@ -118,19 +123,9 @@ func parseIPVSStats(file io.Reader) (IPVSStats, error) {
return stats, nil
}
-// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs.
-func NewIPVSBackendStatus() ([]IPVSBackendStatus, error) {
- fs, err := NewFS(DefaultMountPoint)
- if err != nil {
- return []IPVSBackendStatus{}, err
- }
-
- return fs.NewIPVSBackendStatus()
-}
-
-// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs from the specified `proc` filesystem.
-func (fs FS) NewIPVSBackendStatus() ([]IPVSBackendStatus, error) {
- file, err := os.Open(fs.Path("net/ip_vs"))
+// IPVSBackendStatus reads and returns the status of all (virtual,real) server pairs from the specified `proc` filesystem.
+func (fs FS) IPVSBackendStatus() ([]IPVSBackendStatus, error) {
+ file, err := os.Open(fs.proc.Path("net/ip_vs"))
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/prometheus/procfs/mdstat.go b/vendor/github.com/prometheus/procfs/mdstat.go
index d7a248c0d..2af3ada18 100644
--- a/vendor/github.com/prometheus/procfs/mdstat.go
+++ b/vendor/github.com/prometheus/procfs/mdstat.go
@@ -1,3 +1,16 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
package procfs
import (
@@ -9,8 +22,8 @@ import (
)
var (
- statuslineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[[U_]+\]`)
- buildlineRE = regexp.MustCompile(`\((\d+)/\d+\)`)
+ statusLineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[[U_]+\]`)
+ recoveryLineRE = regexp.MustCompile(`\((\d+)/\d+\)`)
)
// MDStat holds info parsed from /proc/mdstat.
@@ -21,117 +34,160 @@ type MDStat struct {
ActivityState string
// Number of active disks.
DisksActive int64
- // Total number of disks the device consists of.
+ // Total number of disks the device requires.
DisksTotal int64
+ // Number of failed disks.
+ DisksFailed int64
+ // Spare disks in the device.
+ DisksSpare int64
// Number of blocks the device holds.
BlocksTotal int64
// Number of blocks on the device that are in sync.
BlocksSynced int64
}
-// ParseMDStat parses an mdstat-file and returns a struct with the relevant infos.
-func (fs FS) ParseMDStat() (mdstates []MDStat, err error) {
- mdStatusFilePath := fs.Path("mdstat")
- content, err := ioutil.ReadFile(mdStatusFilePath)
+// MDStat parses an mdstat-file (/proc/mdstat) and returns a slice of
+// structs containing the relevant info. More information available here:
+// https://raid.wiki.kernel.org/index.php/Mdstat
+func (fs FS) MDStat() ([]MDStat, error) {
+ data, err := ioutil.ReadFile(fs.proc.Path("mdstat"))
+ if err != nil {
+ return nil, fmt.Errorf("error parsing mdstat %s: %s", fs.proc.Path("mdstat"), err)
+ }
+ mdstat, err := parseMDStat(data)
if err != nil {
- return []MDStat{}, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err)
+ return nil, fmt.Errorf("error parsing mdstat %s: %s", fs.proc.Path("mdstat"), err)
}
+ return mdstat, nil
+}
- mdStates := []MDStat{}
- lines := strings.Split(string(content), "\n")
- for i, l := range lines {
- if l == "" {
- continue
- }
- if l[0] == ' ' {
- continue
- }
- if strings.HasPrefix(l, "Personalities") || strings.HasPrefix(l, "unused") {
+// parseMDStat parses data from mdstat file (/proc/mdstat) and returns a slice of
+// structs containing the relevant info.
+func parseMDStat(mdStatData []byte) ([]MDStat, error) {
+ mdStats := []MDStat{}
+ lines := strings.Split(string(mdStatData), "\n")
+
+ for i, line := range lines {
+ if strings.TrimSpace(line) == "" || line[0] == ' ' ||
+ strings.HasPrefix(line, "Personalities") ||
+ strings.HasPrefix(line, "unused") {
continue
}
- mainLine := strings.Split(l, " ")
- if len(mainLine) < 3 {
- return mdStates, fmt.Errorf("error parsing mdline: %s", l)
+ deviceFields := strings.Fields(line)
+ if len(deviceFields) < 3 {
+ return nil, fmt.Errorf("not enough fields in mdline (expected at least 3): %s", line)
}
- mdName := mainLine[0]
- activityState := mainLine[2]
+ mdName := deviceFields[0] // mdx
+ state := deviceFields[2] // active or inactive
if len(lines) <= i+3 {
- return mdStates, fmt.Errorf(
- "error parsing %s: too few lines for md device %s",
- mdStatusFilePath,
+ return nil, fmt.Errorf(
+ "error parsing %s: too few lines for md device",
mdName,
)
}
- active, total, size, err := evalStatusline(lines[i+1])
+ // Failed disks have the suffix (F) & Spare disks have the suffix (S).
+ fail := int64(strings.Count(line, "(F)"))
+ spare := int64(strings.Count(line, "(S)"))
+ active, total, size, err := evalStatusLine(lines[i], lines[i+1])
+
if err != nil {
- return mdStates, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err)
+ return nil, fmt.Errorf("error parsing md device lines: %s", err)
}
- // j is the line number of the syncing-line.
- j := i + 2
+ syncLineIdx := i + 2
if strings.Contains(lines[i+2], "bitmap") { // skip bitmap line
- j = i + 3
+ syncLineIdx++
}
// If device is syncing at the moment, get the number of currently
// synced bytes, otherwise that number equals the size of the device.
syncedBlocks := size
- if strings.Contains(lines[j], "recovery") || strings.Contains(lines[j], "resync") {
- syncedBlocks, err = evalBuildline(lines[j])
- if err != nil {
- return mdStates, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err)
+ recovering := strings.Contains(lines[syncLineIdx], "recovery")
+ resyncing := strings.Contains(lines[syncLineIdx], "resync")
+
+ // Append recovery and resyncing state info.
+ if recovering || resyncing {
+ if recovering {
+ state = "recovering"
+ } else {
+ state = "resyncing"
+ }
+
+ // Handle case when resync=PENDING or resync=DELAYED.
+ if strings.Contains(lines[syncLineIdx], "PENDING") ||
+ strings.Contains(lines[syncLineIdx], "DELAYED") {
+ syncedBlocks = 0
+ } else {
+ syncedBlocks, err = evalRecoveryLine(lines[syncLineIdx])
+ if err != nil {
+ return nil, fmt.Errorf("error parsing sync line in md device %s: %s", mdName, err)
+ }
}
}
- mdStates = append(mdStates, MDStat{
+ mdStats = append(mdStats, MDStat{
Name: mdName,
- ActivityState: activityState,
+ ActivityState: state,
DisksActive: active,
+ DisksFailed: fail,
+ DisksSpare: spare,
DisksTotal: total,
BlocksTotal: size,
BlocksSynced: syncedBlocks,
})
}
- return mdStates, nil
+ return mdStats, nil
}
-func evalStatusline(statusline string) (active, total, size int64, err error) {
- matches := statuslineRE.FindStringSubmatch(statusline)
- if len(matches) != 4 {
- return 0, 0, 0, fmt.Errorf("unexpected statusline: %s", statusline)
- }
+func evalStatusLine(deviceLine, statusLine string) (active, total, size int64, err error) {
- size, err = strconv.ParseInt(matches[1], 10, 64)
+ sizeStr := strings.Fields(statusLine)[0]
+ size, err = strconv.ParseInt(sizeStr, 10, 64)
if err != nil {
- return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err)
+ return 0, 0, 0, fmt.Errorf("unexpected statusLine %s: %s", statusLine, err)
+ }
+
+ if strings.Contains(deviceLine, "raid0") || strings.Contains(deviceLine, "linear") {
+ // In the device deviceLine, only disks have a number associated with them in [].
+ total = int64(strings.Count(deviceLine, "["))
+ return total, total, size, nil
+ }
+
+ if strings.Contains(deviceLine, "inactive") {
+ return 0, 0, size, nil
+ }
+
+ matches := statusLineRE.FindStringSubmatch(statusLine)
+ if len(matches) != 4 {
+ return 0, 0, 0, fmt.Errorf("couldn't find all the substring matches: %s", statusLine)
}
total, err = strconv.ParseInt(matches[2], 10, 64)
if err != nil {
- return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err)
+ return 0, 0, 0, fmt.Errorf("unexpected statusLine %s: %s", statusLine, err)
}
active, err = strconv.ParseInt(matches[3], 10, 64)
if err != nil {
- return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err)
+ return 0, 0, 0, fmt.Errorf("unexpected statusLine %s: %s", statusLine, err)
}
return active, total, size, nil
}
-func evalBuildline(buildline string) (syncedBlocks int64, err error) {
- matches := buildlineRE.FindStringSubmatch(buildline)
+func evalRecoveryLine(recoveryLine string) (syncedBlocks int64, err error) {
+ matches := recoveryLineRE.FindStringSubmatch(recoveryLine)
if len(matches) != 2 {
- return 0, fmt.Errorf("unexpected buildline: %s", buildline)
+ return 0, fmt.Errorf("unexpected recoveryLine: %s", recoveryLine)
}
syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64)
if err != nil {
- return 0, fmt.Errorf("%s in buildline: %s", err, buildline)
+ return 0, fmt.Errorf("%s in recoveryLine: %s", err, recoveryLine)
}
return syncedBlocks, nil
diff --git a/vendor/github.com/prometheus/procfs/meminfo.go b/vendor/github.com/prometheus/procfs/meminfo.go
new file mode 100644
index 000000000..50dab4bcd
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/meminfo.go
@@ -0,0 +1,277 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+
+ "github.com/prometheus/procfs/internal/util"
+)
+
+// Meminfo represents memory statistics.
+type Meminfo struct {
+ // Total usable ram (i.e. physical ram minus a few reserved
+ // bits and the kernel binary code)
+ MemTotal uint64
+ // The sum of LowFree+HighFree
+ MemFree uint64
+ // An estimate of how much memory is available for starting
+ // new applications, without swapping. Calculated from
+ // MemFree, SReclaimable, the size of the file LRU lists, and
+ // the low watermarks in each zone. The estimate takes into
+ // account that the system needs some page cache to function
+ // well, and that not all reclaimable slab will be
+ // reclaimable, due to items being in use. The impact of those
+ // factors will vary from system to system.
+ MemAvailable uint64
+ // Relatively temporary storage for raw disk blocks shouldn't
+ // get tremendously large (20MB or so)
+ Buffers uint64
+ Cached uint64
+ // Memory that once was swapped out, is swapped back in but
+ // still also is in the swapfile (if memory is needed it
+ // doesn't need to be swapped out AGAIN because it is already
+ // in the swapfile. This saves I/O)
+ SwapCached uint64
+ // Memory that has been used more recently and usually not
+ // reclaimed unless absolutely necessary.
+ Active uint64
+ // Memory which has been less recently used. It is more
+ // eligible to be reclaimed for other purposes
+ Inactive uint64
+ ActiveAnon uint64
+ InactiveAnon uint64
+ ActiveFile uint64
+ InactiveFile uint64
+ Unevictable uint64
+ Mlocked uint64
+ // total amount of swap space available
+ SwapTotal uint64
+ // Memory which has been evicted from RAM, and is temporarily
+ // on the disk
+ SwapFree uint64
+ // Memory which is waiting to get written back to the disk
+ Dirty uint64
+ // Memory which is actively being written back to the disk
+ Writeback uint64
+ // Non-file backed pages mapped into userspace page tables
+ AnonPages uint64
+ // files which have been mapped, such as libraries
+ Mapped uint64
+ Shmem uint64
+ // in-kernel data structures cache
+ Slab uint64
+ // Part of Slab, that might be reclaimed, such as caches
+ SReclaimable uint64
+ // Part of Slab, that cannot be reclaimed on memory pressure
+ SUnreclaim uint64
+ KernelStack uint64
+ // amount of memory dedicated to the lowest level of page
+ // tables.
+ PageTables uint64
+ // NFS pages sent to the server, but not yet committed to
+ // stable storage
+ NFSUnstable uint64
+ // Memory used for block device "bounce buffers"
+ Bounce uint64
+ // Memory used by FUSE for temporary writeback buffers
+ WritebackTmp uint64
+ // Based on the overcommit ratio ('vm.overcommit_ratio'),
+ // this is the total amount of memory currently available to
+ // be allocated on the system. This limit is only adhered to
+ // if strict overcommit accounting is enabled (mode 2 in
+ // 'vm.overcommit_memory').
+ // The CommitLimit is calculated with the following formula:
+ // CommitLimit = ([total RAM pages] - [total huge TLB pages]) *
+ // overcommit_ratio / 100 + [total swap pages]
+ // For example, on a system with 1G of physical RAM and 7G
+ // of swap with a `vm.overcommit_ratio` of 30 it would
+ // yield a CommitLimit of 7.3G.
+ // For more details, see the memory overcommit documentation
+ // in vm/overcommit-accounting.
+ CommitLimit uint64
+ // The amount of memory presently allocated on the system.
+ // The committed memory is a sum of all of the memory which
+ // has been allocated by processes, even if it has not been
+ // "used" by them as of yet. A process which malloc()'s 1G
+ // of memory, but only touches 300M of it will show up as
+ // using 1G. This 1G is memory which has been "committed" to
+ // by the VM and can be used at any time by the allocating
+ // application. With strict overcommit enabled on the system
+ // (mode 2 in 'vm.overcommit_memory'),allocations which would
+ // exceed the CommitLimit (detailed above) will not be permitted.
+ // This is useful if one needs to guarantee that processes will
+ // not fail due to lack of memory once that memory has been
+ // successfully allocated.
+ CommittedAS uint64
+ // total size of vmalloc memory area
+ VmallocTotal uint64
+ // amount of vmalloc area which is used
+ VmallocUsed uint64
+ // largest contiguous block of vmalloc area which is free
+ VmallocChunk uint64
+ HardwareCorrupted uint64
+ AnonHugePages uint64
+ ShmemHugePages uint64
+ ShmemPmdMapped uint64
+ CmaTotal uint64
+ CmaFree uint64
+ HugePagesTotal uint64
+ HugePagesFree uint64
+ HugePagesRsvd uint64
+ HugePagesSurp uint64
+ Hugepagesize uint64
+ DirectMap4k uint64
+ DirectMap2M uint64
+ DirectMap1G uint64
+}
+
+// Meminfo returns an information about current kernel/system memory statistics.
+// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt
+func (fs FS) Meminfo() (Meminfo, error) {
+ b, err := util.ReadFileNoStat(fs.proc.Path("meminfo"))
+ if err != nil {
+ return Meminfo{}, err
+ }
+
+ m, err := parseMemInfo(bytes.NewReader(b))
+ if err != nil {
+ return Meminfo{}, fmt.Errorf("failed to parse meminfo: %v", err)
+ }
+
+ return *m, nil
+}
+
+func parseMemInfo(r io.Reader) (*Meminfo, error) {
+ var m Meminfo
+ s := bufio.NewScanner(r)
+ for s.Scan() {
+ // Each line has at least a name and value; we ignore the unit.
+ fields := strings.Fields(s.Text())
+ if len(fields) < 2 {
+ return nil, fmt.Errorf("malformed meminfo line: %q", s.Text())
+ }
+
+ v, err := strconv.ParseUint(fields[1], 0, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ switch fields[0] {
+ case "MemTotal:":
+ m.MemTotal = v
+ case "MemFree:":
+ m.MemFree = v
+ case "MemAvailable:":
+ m.MemAvailable = v
+ case "Buffers:":
+ m.Buffers = v
+ case "Cached:":
+ m.Cached = v
+ case "SwapCached:":
+ m.SwapCached = v
+ case "Active:":
+ m.Active = v
+ case "Inactive:":
+ m.Inactive = v
+ case "Active(anon):":
+ m.ActiveAnon = v
+ case "Inactive(anon):":
+ m.InactiveAnon = v
+ case "Active(file):":
+ m.ActiveFile = v
+ case "Inactive(file):":
+ m.InactiveFile = v
+ case "Unevictable:":
+ m.Unevictable = v
+ case "Mlocked:":
+ m.Mlocked = v
+ case "SwapTotal:":
+ m.SwapTotal = v
+ case "SwapFree:":
+ m.SwapFree = v
+ case "Dirty:":
+ m.Dirty = v
+ case "Writeback:":
+ m.Writeback = v
+ case "AnonPages:":
+ m.AnonPages = v
+ case "Mapped:":
+ m.Mapped = v
+ case "Shmem:":
+ m.Shmem = v
+ case "Slab:":
+ m.Slab = v
+ case "SReclaimable:":
+ m.SReclaimable = v
+ case "SUnreclaim:":
+ m.SUnreclaim = v
+ case "KernelStack:":
+ m.KernelStack = v
+ case "PageTables:":
+ m.PageTables = v
+ case "NFS_Unstable:":
+ m.NFSUnstable = v
+ case "Bounce:":
+ m.Bounce = v
+ case "WritebackTmp:":
+ m.WritebackTmp = v
+ case "CommitLimit:":
+ m.CommitLimit = v
+ case "Committed_AS:":
+ m.CommittedAS = v
+ case "VmallocTotal:":
+ m.VmallocTotal = v
+ case "VmallocUsed:":
+ m.VmallocUsed = v
+ case "VmallocChunk:":
+ m.VmallocChunk = v
+ case "HardwareCorrupted:":
+ m.HardwareCorrupted = v
+ case "AnonHugePages:":
+ m.AnonHugePages = v
+ case "ShmemHugePages:":
+ m.ShmemHugePages = v
+ case "ShmemPmdMapped:":
+ m.ShmemPmdMapped = v
+ case "CmaTotal:":
+ m.CmaTotal = v
+ case "CmaFree:":
+ m.CmaFree = v
+ case "HugePages_Total:":
+ m.HugePagesTotal = v
+ case "HugePages_Free:":
+ m.HugePagesFree = v
+ case "HugePages_Rsvd:":
+ m.HugePagesRsvd = v
+ case "HugePages_Surp:":
+ m.HugePagesSurp = v
+ case "Hugepagesize:":
+ m.Hugepagesize = v
+ case "DirectMap4k:":
+ m.DirectMap4k = v
+ case "DirectMap2M:":
+ m.DirectMap2M = v
+ case "DirectMap1G:":
+ m.DirectMap1G = v
+ }
+ }
+
+ return &m, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/mountinfo.go b/vendor/github.com/prometheus/procfs/mountinfo.go
new file mode 100644
index 000000000..bb01bb5a2
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/mountinfo.go
@@ -0,0 +1,180 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "strconv"
+ "strings"
+
+ "github.com/prometheus/procfs/internal/util"
+)
+
+// A MountInfo is a type that describes the details, options
+// for each mount, parsed from /proc/self/mountinfo.
+// The fields described in each entry of /proc/self/mountinfo
+// is described in the following man page.
+// http://man7.org/linux/man-pages/man5/proc.5.html
+type MountInfo struct {
+ // Unique Id for the mount
+ MountId int
+ // The Id of the parent mount
+ ParentId int
+ // The value of `st_dev` for the files on this FS
+ MajorMinorVer string
+ // The pathname of the directory in the FS that forms
+ // the root for this mount
+ Root string
+ // The pathname of the mount point relative to the root
+ MountPoint string
+ // Mount options
+ Options map[string]string
+ // Zero or more optional fields
+ OptionalFields map[string]string
+ // The Filesystem type
+ FSType string
+ // FS specific information or "none"
+ Source string
+ // Superblock options
+ SuperOptions map[string]string
+}
+
+// Reads each line of the mountinfo file, and returns a list of formatted MountInfo structs.
+func parseMountInfo(info []byte) ([]*MountInfo, error) {
+ mounts := []*MountInfo{}
+ scanner := bufio.NewScanner(bytes.NewReader(info))
+ for scanner.Scan() {
+ mountString := scanner.Text()
+ parsedMounts, err := parseMountInfoString(mountString)
+ if err != nil {
+ return nil, err
+ }
+ mounts = append(mounts, parsedMounts)
+ }
+
+ err := scanner.Err()
+ return mounts, err
+}
+
+// Parses a mountinfo file line, and converts it to a MountInfo struct.
+// An important check here is to see if the hyphen separator, as if it does not exist,
+// it means that the line is malformed.
+func parseMountInfoString(mountString string) (*MountInfo, error) {
+ var err error
+
+ mountInfo := strings.Split(mountString, " ")
+ mountInfoLength := len(mountInfo)
+ if mountInfoLength < 11 {
+ return nil, fmt.Errorf("couldn't find enough fields in mount string: %s", mountString)
+ }
+
+ if mountInfo[mountInfoLength-4] != "-" {
+ return nil, fmt.Errorf("couldn't find separator in expected field: %s", mountInfo[mountInfoLength-4])
+ }
+
+ mount := &MountInfo{
+ MajorMinorVer: mountInfo[2],
+ Root: mountInfo[3],
+ MountPoint: mountInfo[4],
+ Options: mountOptionsParser(mountInfo[5]),
+ OptionalFields: nil,
+ FSType: mountInfo[mountInfoLength-3],
+ Source: mountInfo[mountInfoLength-2],
+ SuperOptions: mountOptionsParser(mountInfo[mountInfoLength-1]),
+ }
+
+ mount.MountId, err = strconv.Atoi(mountInfo[0])
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse mount ID")
+ }
+ mount.ParentId, err = strconv.Atoi(mountInfo[1])
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse parent ID")
+ }
+ // Has optional fields, which is a space separated list of values.
+ // Example: shared:2 master:7
+ if mountInfo[6] != "" {
+ mount.OptionalFields, err = mountOptionsParseOptionalFields(mountInfo[6 : mountInfoLength-4])
+ if err != nil {
+ return nil, err
+ }
+ }
+ return mount, nil
+}
+
+// mountOptionsIsValidField checks a string against a valid list of optional fields keys.
+func mountOptionsIsValidField(s string) bool {
+ switch s {
+ case
+ "shared",
+ "master",
+ "propagate_from",
+ "unbindable":
+ return true
+ }
+ return false
+}
+
+// mountOptionsParseOptionalFields parses a list of optional fields strings into a double map of strings.
+func mountOptionsParseOptionalFields(o []string) (map[string]string, error) {
+ optionalFields := make(map[string]string)
+ for _, field := range o {
+ optionSplit := strings.SplitN(field, ":", 2)
+ value := ""
+ if len(optionSplit) == 2 {
+ value = optionSplit[1]
+ }
+ if mountOptionsIsValidField(optionSplit[0]) {
+ optionalFields[optionSplit[0]] = value
+ }
+ }
+ return optionalFields, nil
+}
+
+// Parses the mount options, superblock options.
+func mountOptionsParser(mountOptions string) map[string]string {
+ opts := make(map[string]string)
+ options := strings.Split(mountOptions, ",")
+ for _, opt := range options {
+ splitOption := strings.Split(opt, "=")
+ if len(splitOption) < 2 {
+ key := splitOption[0]
+ opts[key] = ""
+ } else {
+ key, value := splitOption[0], splitOption[1]
+ opts[key] = value
+ }
+ }
+ return opts
+}
+
+// Retrieves mountinfo information from `/proc/self/mountinfo`.
+func GetMounts() ([]*MountInfo, error) {
+ data, err := util.ReadFileNoStat("/proc/self/mountinfo")
+ if err != nil {
+ return nil, err
+ }
+ return parseMountInfo(data)
+}
+
+// Retrieves mountinfo information from a processes' `/proc//mountinfo`.
+func GetProcMounts(pid int) ([]*MountInfo, error) {
+ data, err := util.ReadFileNoStat(fmt.Sprintf("/proc/%d/mountinfo", pid))
+ if err != nil {
+ return nil, err
+ }
+ return parseMountInfo(data)
+}
diff --git a/vendor/github.com/prometheus/procfs/mountstats.go b/vendor/github.com/prometheus/procfs/mountstats.go
index 6b2b0ba9d..35b2ef351 100644
--- a/vendor/github.com/prometheus/procfs/mountstats.go
+++ b/vendor/github.com/prometheus/procfs/mountstats.go
@@ -1,3 +1,16 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
package procfs
// While implementing parsing of /proc/[pid]/mountstats, this blog was used
@@ -26,8 +39,11 @@ const (
statVersion10 = "1.0"
statVersion11 = "1.1"
- fieldTransport10Len = 10
- fieldTransport11Len = 13
+ fieldTransport10TCPLen = 10
+ fieldTransport10UDPLen = 7
+
+ fieldTransport11TCPLen = 13
+ fieldTransport11UDPLen = 10
)
// A Mount is a device mount parsed from /proc/[pid]/mountstats.
@@ -53,6 +69,8 @@ type MountStats interface {
type MountStatsNFS struct {
// The version of statistics provided.
StatVersion string
+ // The mount options of the NFS mount.
+ Opts map[string]string
// The age of the NFS mount.
Age time.Duration
// Statistics related to byte counters for various operations.
@@ -163,16 +181,18 @@ type NFSOperationStats struct {
// Number of bytes received for this operation, including RPC headers and payload.
BytesReceived uint64
// Duration all requests spent queued for transmission before they were sent.
- CumulativeQueueTime time.Duration
+ CumulativeQueueMilliseconds uint64
// Duration it took to get a reply back after the request was transmitted.
- CumulativeTotalResponseTime time.Duration
+ CumulativeTotalResponseMilliseconds uint64
// Duration from when a request was enqueued to when it was completely handled.
- CumulativeTotalRequestTime time.Duration
+ CumulativeTotalRequestMilliseconds uint64
}
// A NFSTransportStats contains statistics for the NFS mount RPC requests and
// responses.
type NFSTransportStats struct {
+ // The transport protocol used for the NFS mount.
+ Protocol string
// The local port used for the NFS mount.
Port uint64
// Number of times the client has had to establish a connection from scratch
@@ -184,7 +204,7 @@ type NFSTransportStats struct {
// spent waiting for connections to the server to be established.
ConnectIdleTime uint64
// Duration since the NFS mount last saw any RPC traffic.
- IdleTime time.Duration
+ IdleTimeSeconds uint64
// Number of RPC requests for this mount sent to the NFS server.
Sends uint64
// Number of RPC responses for this mount received from the NFS server.
@@ -299,6 +319,7 @@ func parseMount(ss []string) (*Mount, error) {
func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, error) {
// Field indicators for parsing specific types of data
const (
+ fieldOpts = "opts:"
fieldAge = "age:"
fieldBytes = "bytes:"
fieldEvents = "events:"
@@ -320,6 +341,18 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e
}
switch ss[0] {
+ case fieldOpts:
+ if stats.Opts == nil {
+ stats.Opts = map[string]string{}
+ }
+ for _, opt := range strings.Split(ss[1], ",") {
+ split := strings.Split(opt, "=")
+ if len(split) == 2 {
+ stats.Opts[split[0]] = split[1]
+ } else {
+ stats.Opts[opt] = ""
+ }
+ }
case fieldAge:
// Age integer is in seconds
d, err := time.ParseDuration(ss[1] + "s")
@@ -347,7 +380,7 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e
return nil, fmt.Errorf("not enough information for NFS transport stats: %v", ss)
}
- tstats, err := parseNFSTransportStats(ss[2:], statVersion)
+ tstats, err := parseNFSTransportStats(ss[1:], statVersion)
if err != nil {
return nil, err
}
@@ -491,15 +524,15 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) {
}
ops = append(ops, NFSOperationStats{
- Operation: strings.TrimSuffix(ss[0], ":"),
- Requests: ns[0],
- Transmissions: ns[1],
- MajorTimeouts: ns[2],
- BytesSent: ns[3],
- BytesReceived: ns[4],
- CumulativeQueueTime: time.Duration(ns[5]) * time.Millisecond,
- CumulativeTotalResponseTime: time.Duration(ns[6]) * time.Millisecond,
- CumulativeTotalRequestTime: time.Duration(ns[7]) * time.Millisecond,
+ Operation: strings.TrimSuffix(ss[0], ":"),
+ Requests: ns[0],
+ Transmissions: ns[1],
+ MajorTimeouts: ns[2],
+ BytesSent: ns[3],
+ BytesReceived: ns[4],
+ CumulativeQueueMilliseconds: ns[5],
+ CumulativeTotalResponseMilliseconds: ns[6],
+ CumulativeTotalRequestMilliseconds: ns[7],
})
}
@@ -509,13 +542,33 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) {
// parseNFSTransportStats parses a NFSTransportStats line using an input set of
// integer fields matched to a specific stats version.
func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats, error) {
+ // Extract the protocol field. It is the only string value in the line
+ protocol := ss[0]
+ ss = ss[1:]
+
switch statVersion {
case statVersion10:
- if len(ss) != fieldTransport10Len {
+ var expectedLength int
+ if protocol == "tcp" {
+ expectedLength = fieldTransport10TCPLen
+ } else if protocol == "udp" {
+ expectedLength = fieldTransport10UDPLen
+ } else {
+ return nil, fmt.Errorf("invalid NFS protocol \"%s\" in stats 1.0 statement: %v", protocol, ss)
+ }
+ if len(ss) != expectedLength {
return nil, fmt.Errorf("invalid NFS transport stats 1.0 statement: %v", ss)
}
case statVersion11:
- if len(ss) != fieldTransport11Len {
+ var expectedLength int
+ if protocol == "tcp" {
+ expectedLength = fieldTransport11TCPLen
+ } else if protocol == "udp" {
+ expectedLength = fieldTransport11UDPLen
+ } else {
+ return nil, fmt.Errorf("invalid NFS protocol \"%s\" in stats 1.1 statement: %v", protocol, ss)
+ }
+ if len(ss) != expectedLength {
return nil, fmt.Errorf("invalid NFS transport stats 1.1 statement: %v", ss)
}
default:
@@ -523,12 +576,13 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats
}
// Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay
- // in a v1.0 response.
+ // in a v1.0 response. Since the stat length is bigger for TCP stats, we use
+ // the TCP length here.
//
// Note: slice length must be set to length of v1.1 stats to avoid a panic when
// only v1.0 stats are present.
// See: https://github.com/prometheus/node_exporter/issues/571.
- ns := make([]uint64, fieldTransport11Len)
+ ns := make([]uint64, fieldTransport11TCPLen)
for i, s := range ss {
n, err := strconv.ParseUint(s, 10, 64)
if err != nil {
@@ -538,12 +592,23 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats
ns[i] = n
}
+ // The fields differ depending on the transport protocol (TCP or UDP)
+ // From https://utcc.utoronto.ca/%7Ecks/space/blog/linux/NFSMountstatsXprt
+ //
+ // For the udp RPC transport there is no connection count, connect idle time,
+ // or idle time (fields #3, #4, and #5); all other fields are the same. So
+ // we set them to 0 here.
+ if protocol == "udp" {
+ ns = append(ns[:2], append(make([]uint64, 3), ns[2:]...)...)
+ }
+
return &NFSTransportStats{
+ Protocol: protocol,
Port: ns[0],
Bind: ns[1],
Connect: ns[2],
ConnectIdleTime: ns[3],
- IdleTime: time.Duration(ns[4]) * time.Second,
+ IdleTimeSeconds: ns[4],
Sends: ns[5],
Receives: ns[6],
BadTransactionIDs: ns[7],
diff --git a/vendor/github.com/prometheus/procfs/net_dev.go b/vendor/github.com/prometheus/procfs/net_dev.go
index f8c184efe..47a710bef 100644
--- a/vendor/github.com/prometheus/procfs/net_dev.go
+++ b/vendor/github.com/prometheus/procfs/net_dev.go
@@ -1,3 +1,16 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
package procfs
import (
@@ -34,23 +47,13 @@ type NetDevLine struct {
// are interface names.
type NetDev map[string]NetDevLine
-// NewNetDev returns kernel/system statistics read from /proc/net/dev.
-func NewNetDev() (NetDev, error) {
- fs, err := NewFS(DefaultMountPoint)
- if err != nil {
- return nil, err
- }
-
- return fs.NewNetDev()
-}
-
-// NewNetDev returns kernel/system statistics read from /proc/net/dev.
-func (fs FS) NewNetDev() (NetDev, error) {
- return newNetDev(fs.Path("net/dev"))
+// NetDev returns kernel/system statistics read from /proc/net/dev.
+func (fs FS) NetDev() (NetDev, error) {
+ return newNetDev(fs.proc.Path("net/dev"))
}
-// NewNetDev returns kernel/system statistics read from /proc/[pid]/net/dev.
-func (p Proc) NewNetDev() (NetDev, error) {
+// NetDev returns kernel/system statistics read from /proc/[pid]/net/dev.
+func (p Proc) NetDev() (NetDev, error) {
return newNetDev(p.path("net/dev"))
}
@@ -62,7 +65,7 @@ func newNetDev(file string) (NetDev, error) {
}
defer f.Close()
- nd := NetDev{}
+ netDev := NetDev{}
s := bufio.NewScanner(f)
for n := 0; s.Scan(); n++ {
// Skip the 2 header lines.
@@ -70,20 +73,20 @@ func newNetDev(file string) (NetDev, error) {
continue
}
- line, err := nd.parseLine(s.Text())
+ line, err := netDev.parseLine(s.Text())
if err != nil {
- return nd, err
+ return netDev, err
}
- nd[line.Name] = *line
+ netDev[line.Name] = *line
}
- return nd, s.Err()
+ return netDev, s.Err()
}
// parseLine parses a single line from the /proc/net/dev file. Header lines
// must be filtered prior to calling this method.
-func (nd NetDev) parseLine(rawLine string) (*NetDevLine, error) {
+func (netDev NetDev) parseLine(rawLine string) (*NetDevLine, error) {
parts := strings.SplitN(rawLine, ":", 2)
if len(parts) != 2 {
return nil, errors.New("invalid net/dev line, missing colon")
@@ -171,16 +174,15 @@ func (nd NetDev) parseLine(rawLine string) (*NetDevLine, error) {
}
// Total aggregates the values across interfaces and returns a new NetDevLine.
-// The Name field will be a sorted comma seperated list of interface names.
-func (nd NetDev) Total() NetDevLine {
+// The Name field will be a sorted comma separated list of interface names.
+func (netDev NetDev) Total() NetDevLine {
total := NetDevLine{}
- names := make([]string, 0, len(nd))
- for _, ifc := range nd {
+ names := make([]string, 0, len(netDev))
+ for _, ifc := range netDev {
names = append(names, ifc.Name)
total.RxBytes += ifc.RxBytes
total.RxPackets += ifc.RxPackets
- total.RxPackets += ifc.RxPackets
total.RxErrors += ifc.RxErrors
total.RxDropped += ifc.RxDropped
total.RxFIFO += ifc.RxFIFO
diff --git a/vendor/github.com/prometheus/procfs/net_sockstat.go b/vendor/github.com/prometheus/procfs/net_sockstat.go
new file mode 100644
index 000000000..f91ef5523
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/net_sockstat.go
@@ -0,0 +1,163 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "strings"
+
+ "github.com/prometheus/procfs/internal/util"
+)
+
+// A NetSockstat contains the output of /proc/net/sockstat{,6} for IPv4 or IPv6,
+// respectively.
+type NetSockstat struct {
+ // Used is non-nil for IPv4 sockstat results, but nil for IPv6.
+ Used *int
+ Protocols []NetSockstatProtocol
+}
+
+// A NetSockstatProtocol contains statistics about a given socket protocol.
+// Pointer fields indicate that the value may or may not be present on any
+// given protocol.
+type NetSockstatProtocol struct {
+ Protocol string
+ InUse int
+ Orphan *int
+ TW *int
+ Alloc *int
+ Mem *int
+ Memory *int
+}
+
+// NetSockstat retrieves IPv4 socket statistics.
+func (fs FS) NetSockstat() (*NetSockstat, error) {
+ return readSockstat(fs.proc.Path("net", "sockstat"))
+}
+
+// NetSockstat6 retrieves IPv6 socket statistics.
+//
+// If IPv6 is disabled on this kernel, the returned error can be checked with
+// os.IsNotExist.
+func (fs FS) NetSockstat6() (*NetSockstat, error) {
+ return readSockstat(fs.proc.Path("net", "sockstat6"))
+}
+
+// readSockstat opens and parses a NetSockstat from the input file.
+func readSockstat(name string) (*NetSockstat, error) {
+ // This file is small and can be read with one syscall.
+ b, err := util.ReadFileNoStat(name)
+ if err != nil {
+ // Do not wrap this error so the caller can detect os.IsNotExist and
+ // similar conditions.
+ return nil, err
+ }
+
+ stat, err := parseSockstat(bytes.NewReader(b))
+ if err != nil {
+ return nil, fmt.Errorf("failed to read sockstats from %q: %v", name, err)
+ }
+
+ return stat, nil
+}
+
+// parseSockstat reads the contents of a sockstat file and parses a NetSockstat.
+func parseSockstat(r io.Reader) (*NetSockstat, error) {
+ var stat NetSockstat
+ s := bufio.NewScanner(r)
+ for s.Scan() {
+ // Expect a minimum of a protocol and one key/value pair.
+ fields := strings.Split(s.Text(), " ")
+ if len(fields) < 3 {
+ return nil, fmt.Errorf("malformed sockstat line: %q", s.Text())
+ }
+
+ // The remaining fields are key/value pairs.
+ kvs, err := parseSockstatKVs(fields[1:])
+ if err != nil {
+ return nil, fmt.Errorf("error parsing sockstat key/value pairs from %q: %v", s.Text(), err)
+ }
+
+ // The first field is the protocol. We must trim its colon suffix.
+ proto := strings.TrimSuffix(fields[0], ":")
+ switch proto {
+ case "sockets":
+ // Special case: IPv4 has a sockets "used" key/value pair that we
+ // embed at the top level of the structure.
+ used := kvs["used"]
+ stat.Used = &used
+ default:
+ // Parse all other lines as individual protocols.
+ nsp := parseSockstatProtocol(kvs)
+ nsp.Protocol = proto
+ stat.Protocols = append(stat.Protocols, nsp)
+ }
+ }
+
+ if err := s.Err(); err != nil {
+ return nil, err
+ }
+
+ return &stat, nil
+}
+
+// parseSockstatKVs parses a string slice into a map of key/value pairs.
+func parseSockstatKVs(kvs []string) (map[string]int, error) {
+ if len(kvs)%2 != 0 {
+ return nil, errors.New("odd number of fields in key/value pairs")
+ }
+
+ // Iterate two values at a time to gather key/value pairs.
+ out := make(map[string]int, len(kvs)/2)
+ for i := 0; i < len(kvs); i += 2 {
+ vp := util.NewValueParser(kvs[i+1])
+ out[kvs[i]] = vp.Int()
+
+ if err := vp.Err(); err != nil {
+ return nil, err
+ }
+ }
+
+ return out, nil
+}
+
+// parseSockstatProtocol parses a NetSockstatProtocol from the input kvs map.
+func parseSockstatProtocol(kvs map[string]int) NetSockstatProtocol {
+ var nsp NetSockstatProtocol
+ for k, v := range kvs {
+ // Capture the range variable to ensure we get unique pointers for
+ // each of the optional fields.
+ v := v
+ switch k {
+ case "inuse":
+ nsp.InUse = v
+ case "orphan":
+ nsp.Orphan = &v
+ case "tw":
+ nsp.TW = &v
+ case "alloc":
+ nsp.Alloc = &v
+ case "mem":
+ nsp.Mem = &v
+ case "memory":
+ nsp.Memory = &v
+ }
+ }
+
+ return nsp
+}
diff --git a/vendor/github.com/prometheus/procfs/net_softnet.go b/vendor/github.com/prometheus/procfs/net_softnet.go
new file mode 100644
index 000000000..6fcad20af
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/net_softnet.go
@@ -0,0 +1,91 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "fmt"
+ "io/ioutil"
+ "strconv"
+ "strings"
+)
+
+// For the proc file format details,
+// see https://elixir.bootlin.com/linux/v4.17/source/net/core/net-procfs.c#L162
+// and https://elixir.bootlin.com/linux/v4.17/source/include/linux/netdevice.h#L2810.
+
+// SoftnetEntry contains a single row of data from /proc/net/softnet_stat
+type SoftnetEntry struct {
+ // Number of processed packets
+ Processed uint
+ // Number of dropped packets
+ Dropped uint
+ // Number of times processing packets ran out of quota
+ TimeSqueezed uint
+}
+
+// GatherSoftnetStats reads /proc/net/softnet_stat, parse the relevant columns,
+// and then return a slice of SoftnetEntry's.
+func (fs FS) GatherSoftnetStats() ([]SoftnetEntry, error) {
+ data, err := ioutil.ReadFile(fs.proc.Path("net/softnet_stat"))
+ if err != nil {
+ return nil, fmt.Errorf("error reading softnet %s: %s", fs.proc.Path("net/softnet_stat"), err)
+ }
+
+ return parseSoftnetEntries(data)
+}
+
+func parseSoftnetEntries(data []byte) ([]SoftnetEntry, error) {
+ lines := strings.Split(string(data), "\n")
+ entries := make([]SoftnetEntry, 0)
+ var err error
+ const (
+ expectedColumns = 11
+ )
+ for _, line := range lines {
+ columns := strings.Fields(line)
+ width := len(columns)
+ if width == 0 {
+ continue
+ }
+ if width != expectedColumns {
+ return []SoftnetEntry{}, fmt.Errorf("%d columns were detected, but %d were expected", width, expectedColumns)
+ }
+ var entry SoftnetEntry
+ if entry, err = parseSoftnetEntry(columns); err != nil {
+ return []SoftnetEntry{}, err
+ }
+ entries = append(entries, entry)
+ }
+
+ return entries, nil
+}
+
+func parseSoftnetEntry(columns []string) (SoftnetEntry, error) {
+ var err error
+ var processed, dropped, timeSqueezed uint64
+ if processed, err = strconv.ParseUint(columns[0], 16, 32); err != nil {
+ return SoftnetEntry{}, fmt.Errorf("Unable to parse column 0: %s", err)
+ }
+ if dropped, err = strconv.ParseUint(columns[1], 16, 32); err != nil {
+ return SoftnetEntry{}, fmt.Errorf("Unable to parse column 1: %s", err)
+ }
+ if timeSqueezed, err = strconv.ParseUint(columns[2], 16, 32); err != nil {
+ return SoftnetEntry{}, fmt.Errorf("Unable to parse column 2: %s", err)
+ }
+ return SoftnetEntry{
+ Processed: uint(processed),
+ Dropped: uint(dropped),
+ TimeSqueezed: uint(timeSqueezed),
+ }, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/net_unix.go b/vendor/github.com/prometheus/procfs/net_unix.go
new file mode 100644
index 000000000..93bd58f80
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/net_unix.go
@@ -0,0 +1,271 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "strconv"
+ "strings"
+)
+
+// For the proc file format details,
+// see https://elixir.bootlin.com/linux/v4.17/source/net/unix/af_unix.c#L2815
+// and https://elixir.bootlin.com/linux/latest/source/include/uapi/linux/net.h#L48.
+
+const (
+ netUnixKernelPtrIdx = iota
+ netUnixRefCountIdx
+ _
+ netUnixFlagsIdx
+ netUnixTypeIdx
+ netUnixStateIdx
+ netUnixInodeIdx
+
+ // Inode and Path are optional.
+ netUnixStaticFieldsCnt = 6
+)
+
+const (
+ netUnixTypeStream = 1
+ netUnixTypeDgram = 2
+ netUnixTypeSeqpacket = 5
+
+ netUnixFlagListen = 1 << 16
+
+ netUnixStateUnconnected = 1
+ netUnixStateConnecting = 2
+ netUnixStateConnected = 3
+ netUnixStateDisconnected = 4
+)
+
+var errInvalidKernelPtrFmt = errors.New("Invalid Num(the kernel table slot number) format")
+
+// NetUnixType is the type of the type field.
+type NetUnixType uint64
+
+// NetUnixFlags is the type of the flags field.
+type NetUnixFlags uint64
+
+// NetUnixState is the type of the state field.
+type NetUnixState uint64
+
+// NetUnixLine represents a line of /proc/net/unix.
+type NetUnixLine struct {
+ KernelPtr string
+ RefCount uint64
+ Protocol uint64
+ Flags NetUnixFlags
+ Type NetUnixType
+ State NetUnixState
+ Inode uint64
+ Path string
+}
+
+// NetUnix holds the data read from /proc/net/unix.
+type NetUnix struct {
+ Rows []*NetUnixLine
+}
+
+// NewNetUnix returns data read from /proc/net/unix.
+func NewNetUnix() (*NetUnix, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return nil, err
+ }
+
+ return fs.NewNetUnix()
+}
+
+// NewNetUnix returns data read from /proc/net/unix.
+func (fs FS) NewNetUnix() (*NetUnix, error) {
+ return NewNetUnixByPath(fs.proc.Path("net/unix"))
+}
+
+// NewNetUnixByPath returns data read from /proc/net/unix by file path.
+// It might returns an error with partial parsed data, if an error occur after some data parsed.
+func NewNetUnixByPath(path string) (*NetUnix, error) {
+ f, err := os.Open(path)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+ return NewNetUnixByReader(f)
+}
+
+// NewNetUnixByReader returns data read from /proc/net/unix by a reader.
+// It might returns an error with partial parsed data, if an error occur after some data parsed.
+func NewNetUnixByReader(reader io.Reader) (*NetUnix, error) {
+ nu := &NetUnix{
+ Rows: make([]*NetUnixLine, 0, 32),
+ }
+ scanner := bufio.NewScanner(reader)
+ // Omit the header line.
+ scanner.Scan()
+ header := scanner.Text()
+ // From the man page of proc(5), it does not contain an Inode field,
+ // but in actually it exists.
+ // This code works for both cases.
+ hasInode := strings.Contains(header, "Inode")
+
+ minFieldsCnt := netUnixStaticFieldsCnt
+ if hasInode {
+ minFieldsCnt++
+ }
+ for scanner.Scan() {
+ line := scanner.Text()
+ item, err := nu.parseLine(line, hasInode, minFieldsCnt)
+ if err != nil {
+ return nu, err
+ }
+ nu.Rows = append(nu.Rows, item)
+ }
+
+ return nu, scanner.Err()
+}
+
+func (u *NetUnix) parseLine(line string, hasInode bool, minFieldsCnt int) (*NetUnixLine, error) {
+ fields := strings.Fields(line)
+ fieldsLen := len(fields)
+ if fieldsLen < minFieldsCnt {
+ return nil, fmt.Errorf(
+ "Parse Unix domain failed: expect at least %d fields but got %d",
+ minFieldsCnt, fieldsLen)
+ }
+ kernelPtr, err := u.parseKernelPtr(fields[netUnixKernelPtrIdx])
+ if err != nil {
+ return nil, fmt.Errorf("Parse Unix domain num(%s) failed: %s", fields[netUnixKernelPtrIdx], err)
+ }
+ users, err := u.parseUsers(fields[netUnixRefCountIdx])
+ if err != nil {
+ return nil, fmt.Errorf("Parse Unix domain ref count(%s) failed: %s", fields[netUnixRefCountIdx], err)
+ }
+ flags, err := u.parseFlags(fields[netUnixFlagsIdx])
+ if err != nil {
+ return nil, fmt.Errorf("Parse Unix domain flags(%s) failed: %s", fields[netUnixFlagsIdx], err)
+ }
+ typ, err := u.parseType(fields[netUnixTypeIdx])
+ if err != nil {
+ return nil, fmt.Errorf("Parse Unix domain type(%s) failed: %s", fields[netUnixTypeIdx], err)
+ }
+ state, err := u.parseState(fields[netUnixStateIdx])
+ if err != nil {
+ return nil, fmt.Errorf("Parse Unix domain state(%s) failed: %s", fields[netUnixStateIdx], err)
+ }
+ var inode uint64
+ if hasInode {
+ inodeStr := fields[netUnixInodeIdx]
+ inode, err = u.parseInode(inodeStr)
+ if err != nil {
+ return nil, fmt.Errorf("Parse Unix domain inode(%s) failed: %s", inodeStr, err)
+ }
+ }
+
+ nuLine := &NetUnixLine{
+ KernelPtr: kernelPtr,
+ RefCount: users,
+ Type: typ,
+ Flags: flags,
+ State: state,
+ Inode: inode,
+ }
+
+ // Path field is optional.
+ if fieldsLen > minFieldsCnt {
+ pathIdx := netUnixInodeIdx + 1
+ if !hasInode {
+ pathIdx--
+ }
+ nuLine.Path = fields[pathIdx]
+ }
+
+ return nuLine, nil
+}
+
+func (u NetUnix) parseKernelPtr(str string) (string, error) {
+ if !strings.HasSuffix(str, ":") {
+ return "", errInvalidKernelPtrFmt
+ }
+ return str[:len(str)-1], nil
+}
+
+func (u NetUnix) parseUsers(hexStr string) (uint64, error) {
+ return strconv.ParseUint(hexStr, 16, 32)
+}
+
+func (u NetUnix) parseType(hexStr string) (NetUnixType, error) {
+ typ, err := strconv.ParseUint(hexStr, 16, 16)
+ if err != nil {
+ return 0, err
+ }
+ return NetUnixType(typ), nil
+}
+
+func (u NetUnix) parseFlags(hexStr string) (NetUnixFlags, error) {
+ flags, err := strconv.ParseUint(hexStr, 16, 32)
+ if err != nil {
+ return 0, err
+ }
+ return NetUnixFlags(flags), nil
+}
+
+func (u NetUnix) parseState(hexStr string) (NetUnixState, error) {
+ st, err := strconv.ParseInt(hexStr, 16, 8)
+ if err != nil {
+ return 0, err
+ }
+ return NetUnixState(st), nil
+}
+
+func (u NetUnix) parseInode(inodeStr string) (uint64, error) {
+ return strconv.ParseUint(inodeStr, 10, 64)
+}
+
+func (t NetUnixType) String() string {
+ switch t {
+ case netUnixTypeStream:
+ return "stream"
+ case netUnixTypeDgram:
+ return "dgram"
+ case netUnixTypeSeqpacket:
+ return "seqpacket"
+ }
+ return "unknown"
+}
+
+func (f NetUnixFlags) String() string {
+ switch f {
+ case netUnixFlagListen:
+ return "listen"
+ default:
+ return "default"
+ }
+}
+
+func (s NetUnixState) String() string {
+ switch s {
+ case netUnixStateUnconnected:
+ return "unconnected"
+ case netUnixStateConnecting:
+ return "connecting"
+ case netUnixStateConnected:
+ return "connected"
+ case netUnixStateDisconnected:
+ return "disconnected"
+ }
+ return "unknown"
+}
diff --git a/vendor/github.com/prometheus/procfs/nfs/nfs.go b/vendor/github.com/prometheus/procfs/nfs/nfs.go
deleted file mode 100644
index e2185b782..000000000
--- a/vendor/github.com/prometheus/procfs/nfs/nfs.go
+++ /dev/null
@@ -1,263 +0,0 @@
-// Copyright 2018 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package nfsd implements parsing of /proc/net/rpc/nfsd.
-// Fields are documented in https://www.svennd.be/nfsd-stats-explained-procnetrpcnfsd/
-package nfs
-
-// ReplyCache models the "rc" line.
-type ReplyCache struct {
- Hits uint64
- Misses uint64
- NoCache uint64
-}
-
-// FileHandles models the "fh" line.
-type FileHandles struct {
- Stale uint64
- TotalLookups uint64
- AnonLookups uint64
- DirNoCache uint64
- NoDirNoCache uint64
-}
-
-// InputOutput models the "io" line.
-type InputOutput struct {
- Read uint64
- Write uint64
-}
-
-// Threads models the "th" line.
-type Threads struct {
- Threads uint64
- FullCnt uint64
-}
-
-// ReadAheadCache models the "ra" line.
-type ReadAheadCache struct {
- CacheSize uint64
- CacheHistogram []uint64
- NotFound uint64
-}
-
-// Network models the "net" line.
-type Network struct {
- NetCount uint64
- UDPCount uint64
- TCPCount uint64
- TCPConnect uint64
-}
-
-// ClientRPC models the nfs "rpc" line.
-type ClientRPC struct {
- RPCCount uint64
- Retransmissions uint64
- AuthRefreshes uint64
-}
-
-// ServerRPC models the nfsd "rpc" line.
-type ServerRPC struct {
- RPCCount uint64
- BadCnt uint64
- BadFmt uint64
- BadAuth uint64
- BadcInt uint64
-}
-
-// V2Stats models the "proc2" line.
-type V2Stats struct {
- Null uint64
- GetAttr uint64
- SetAttr uint64
- Root uint64
- Lookup uint64
- ReadLink uint64
- Read uint64
- WrCache uint64
- Write uint64
- Create uint64
- Remove uint64
- Rename uint64
- Link uint64
- SymLink uint64
- MkDir uint64
- RmDir uint64
- ReadDir uint64
- FsStat uint64
-}
-
-// V3Stats models the "proc3" line.
-type V3Stats struct {
- Null uint64
- GetAttr uint64
- SetAttr uint64
- Lookup uint64
- Access uint64
- ReadLink uint64
- Read uint64
- Write uint64
- Create uint64
- MkDir uint64
- SymLink uint64
- MkNod uint64
- Remove uint64
- RmDir uint64
- Rename uint64
- Link uint64
- ReadDir uint64
- ReadDirPlus uint64
- FsStat uint64
- FsInfo uint64
- PathConf uint64
- Commit uint64
-}
-
-// ClientV4Stats models the nfs "proc4" line.
-type ClientV4Stats struct {
- Null uint64
- Read uint64
- Write uint64
- Commit uint64
- Open uint64
- OpenConfirm uint64
- OpenNoattr uint64
- OpenDowngrade uint64
- Close uint64
- Setattr uint64
- FsInfo uint64
- Renew uint64
- SetClientId uint64
- SetClientIdConfirm uint64
- Lock uint64
- Lockt uint64
- Locku uint64
- Access uint64
- Getattr uint64
- Lookup uint64
- LookupRoot uint64
- Remove uint64
- Rename uint64
- Link uint64
- Symlink uint64
- Create uint64
- Pathconf uint64
- StatFs uint64
- ReadLink uint64
- ReadDir uint64
- ServerCaps uint64
- DelegReturn uint64
- GetAcl uint64
- SetAcl uint64
- FsLocations uint64
- ReleaseLockowner uint64
- Secinfo uint64
- FsidPresent uint64
- ExchangeId uint64
- CreateSession uint64
- DestroySession uint64
- Sequence uint64
- GetLeaseTime uint64
- ReclaimComplete uint64
- LayoutGet uint64
- GetDeviceInfo uint64
- LayoutCommit uint64
- LayoutReturn uint64
- SecinfoNoName uint64
- TestStateId uint64
- FreeStateId uint64
- GetDeviceList uint64
- BindConnToSession uint64
- DestroyClientId uint64
- Seek uint64
- Allocate uint64
- DeAllocate uint64
- LayoutStats uint64
- Clone uint64
-}
-
-// ServerV4Stats models the nfsd "proc4" line.
-type ServerV4Stats struct {
- Null uint64
- Compound uint64
-}
-
-// V4Ops models the "proc4ops" line: NFSv4 operations
-// Variable list, see:
-// v4.0 https://tools.ietf.org/html/rfc3010 (38 operations)
-// v4.1 https://tools.ietf.org/html/rfc5661 (58 operations)
-// v4.2 https://tools.ietf.org/html/draft-ietf-nfsv4-minorversion2-41 (71 operations)
-type V4Ops struct {
- //Values uint64 // Variable depending on v4.x sub-version. TODO: Will this always at least include the fields in this struct?
- Op0Unused uint64
- Op1Unused uint64
- Op2Future uint64
- Access uint64
- Close uint64
- Commit uint64
- Create uint64
- DelegPurge uint64
- DelegReturn uint64
- GetAttr uint64
- GetFH uint64
- Link uint64
- Lock uint64
- Lockt uint64
- Locku uint64
- Lookup uint64
- LookupRoot uint64
- Nverify uint64
- Open uint64
- OpenAttr uint64
- OpenConfirm uint64
- OpenDgrd uint64
- PutFH uint64
- PutPubFH uint64
- PutRootFH uint64
- Read uint64
- ReadDir uint64
- ReadLink uint64
- Remove uint64
- Rename uint64
- Renew uint64
- RestoreFH uint64
- SaveFH uint64
- SecInfo uint64
- SetAttr uint64
- Verify uint64
- Write uint64
- RelLockOwner uint64
-}
-
-// RPCStats models all stats from /proc/net/rpc/nfs.
-type ClientRPCStats struct {
- Network Network
- ClientRPC ClientRPC
- V2Stats V2Stats
- V3Stats V3Stats
- ClientV4Stats ClientV4Stats
-}
-
-// ServerRPCStats models all stats from /proc/net/rpc/nfsd.
-type ServerRPCStats struct {
- ReplyCache ReplyCache
- FileHandles FileHandles
- InputOutput InputOutput
- Threads Threads
- ReadAheadCache ReadAheadCache
- Network Network
- ServerRPC ServerRPC
- V2Stats V2Stats
- V3Stats V3Stats
- ServerV4Stats ServerV4Stats
- V4Ops V4Ops
-}
diff --git a/vendor/github.com/prometheus/procfs/nfs/parse.go b/vendor/github.com/prometheus/procfs/nfs/parse.go
deleted file mode 100644
index 3aa32563a..000000000
--- a/vendor/github.com/prometheus/procfs/nfs/parse.go
+++ /dev/null
@@ -1,308 +0,0 @@
-// Copyright 2018 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package nfs
-
-import (
- "fmt"
-)
-
-func parseReplyCache(v []uint64) (ReplyCache, error) {
- if len(v) != 3 {
- return ReplyCache{}, fmt.Errorf("invalid ReplyCache line %q", v)
- }
-
- return ReplyCache{
- Hits: v[0],
- Misses: v[1],
- NoCache: v[2],
- }, nil
-}
-
-func parseFileHandles(v []uint64) (FileHandles, error) {
- if len(v) != 5 {
- return FileHandles{}, fmt.Errorf("invalid FileHandles, line %q", v)
- }
-
- return FileHandles{
- Stale: v[0],
- TotalLookups: v[1],
- AnonLookups: v[2],
- DirNoCache: v[3],
- NoDirNoCache: v[4],
- }, nil
-}
-
-func parseInputOutput(v []uint64) (InputOutput, error) {
- if len(v) != 2 {
- return InputOutput{}, fmt.Errorf("invalid InputOutput line %q", v)
- }
-
- return InputOutput{
- Read: v[0],
- Write: v[1],
- }, nil
-}
-
-func parseThreads(v []uint64) (Threads, error) {
- if len(v) != 2 {
- return Threads{}, fmt.Errorf("invalid Threads line %q", v)
- }
-
- return Threads{
- Threads: v[0],
- FullCnt: v[1],
- }, nil
-}
-
-func parseReadAheadCache(v []uint64) (ReadAheadCache, error) {
- if len(v) != 12 {
- return ReadAheadCache{}, fmt.Errorf("invalid ReadAheadCache line %q", v)
- }
-
- return ReadAheadCache{
- CacheSize: v[0],
- CacheHistogram: v[1:11],
- NotFound: v[11],
- }, nil
-}
-
-func parseNetwork(v []uint64) (Network, error) {
- if len(v) != 4 {
- return Network{}, fmt.Errorf("invalid Network line %q", v)
- }
-
- return Network{
- NetCount: v[0],
- UDPCount: v[1],
- TCPCount: v[2],
- TCPConnect: v[3],
- }, nil
-}
-
-func parseServerRPC(v []uint64) (ServerRPC, error) {
- if len(v) != 5 {
- return ServerRPC{}, fmt.Errorf("invalid RPC line %q", v)
- }
-
- return ServerRPC{
- RPCCount: v[0],
- BadCnt: v[1],
- BadFmt: v[2],
- BadAuth: v[3],
- BadcInt: v[4],
- }, nil
-}
-
-func parseClientRPC(v []uint64) (ClientRPC, error) {
- if len(v) != 3 {
- return ClientRPC{}, fmt.Errorf("invalid RPC line %q", v)
- }
-
- return ClientRPC{
- RPCCount: v[0],
- Retransmissions: v[1],
- AuthRefreshes: v[2],
- }, nil
-}
-
-func parseV2Stats(v []uint64) (V2Stats, error) {
- values := int(v[0])
- if len(v[1:]) != values || values != 18 {
- return V2Stats{}, fmt.Errorf("invalid V2Stats line %q", v)
- }
-
- return V2Stats{
- Null: v[1],
- GetAttr: v[2],
- SetAttr: v[3],
- Root: v[4],
- Lookup: v[5],
- ReadLink: v[6],
- Read: v[7],
- WrCache: v[8],
- Write: v[9],
- Create: v[10],
- Remove: v[11],
- Rename: v[12],
- Link: v[13],
- SymLink: v[14],
- MkDir: v[15],
- RmDir: v[16],
- ReadDir: v[17],
- FsStat: v[18],
- }, nil
-}
-
-func parseV3Stats(v []uint64) (V3Stats, error) {
- values := int(v[0])
- if len(v[1:]) != values || values != 22 {
- return V3Stats{}, fmt.Errorf("invalid V3Stats line %q", v)
- }
-
- return V3Stats{
- Null: v[1],
- GetAttr: v[2],
- SetAttr: v[3],
- Lookup: v[4],
- Access: v[5],
- ReadLink: v[6],
- Read: v[7],
- Write: v[8],
- Create: v[9],
- MkDir: v[10],
- SymLink: v[11],
- MkNod: v[12],
- Remove: v[13],
- RmDir: v[14],
- Rename: v[15],
- Link: v[16],
- ReadDir: v[17],
- ReadDirPlus: v[18],
- FsStat: v[19],
- FsInfo: v[20],
- PathConf: v[21],
- Commit: v[22],
- }, nil
-}
-
-func parseClientV4Stats(v []uint64) (ClientV4Stats, error) {
- values := int(v[0])
- if len(v[1:]) != values || values < 59 {
- return ClientV4Stats{}, fmt.Errorf("invalid V4Stats line %q", v)
- }
-
- return ClientV4Stats{
- Null: v[1],
- Read: v[2],
- Write: v[3],
- Commit: v[4],
- Open: v[5],
- OpenConfirm: v[6],
- OpenNoattr: v[7],
- OpenDowngrade: v[8],
- Close: v[9],
- Setattr: v[10],
- FsInfo: v[11],
- Renew: v[12],
- SetClientId: v[13],
- SetClientIdConfirm: v[14],
- Lock: v[15],
- Lockt: v[16],
- Locku: v[17],
- Access: v[18],
- Getattr: v[19],
- Lookup: v[20],
- LookupRoot: v[21],
- Remove: v[22],
- Rename: v[23],
- Link: v[24],
- Symlink: v[25],
- Create: v[26],
- Pathconf: v[27],
- StatFs: v[28],
- ReadLink: v[29],
- ReadDir: v[30],
- ServerCaps: v[31],
- DelegReturn: v[32],
- GetAcl: v[33],
- SetAcl: v[34],
- FsLocations: v[35],
- ReleaseLockowner: v[36],
- Secinfo: v[37],
- FsidPresent: v[38],
- ExchangeId: v[39],
- CreateSession: v[40],
- DestroySession: v[41],
- Sequence: v[42],
- GetLeaseTime: v[43],
- ReclaimComplete: v[44],
- LayoutGet: v[45],
- GetDeviceInfo: v[46],
- LayoutCommit: v[47],
- LayoutReturn: v[48],
- SecinfoNoName: v[49],
- TestStateId: v[50],
- FreeStateId: v[51],
- GetDeviceList: v[52],
- BindConnToSession: v[53],
- DestroyClientId: v[54],
- Seek: v[55],
- Allocate: v[56],
- DeAllocate: v[57],
- LayoutStats: v[58],
- Clone: v[59],
- }, nil
-}
-
-func parseServerV4Stats(v []uint64) (ServerV4Stats, error) {
- values := int(v[0])
- if len(v[1:]) != values || values != 2 {
- return ServerV4Stats{}, fmt.Errorf("invalid V4Stats line %q", v)
- }
-
- return ServerV4Stats{
- Null: v[1],
- Compound: v[2],
- }, nil
-}
-
-func parseV4Ops(v []uint64) (V4Ops, error) {
- values := int(v[0])
- if len(v[1:]) != values || values < 39 {
- return V4Ops{}, fmt.Errorf("invalid V4Ops line %q", v)
- }
-
- stats := V4Ops{
- Op0Unused: v[1],
- Op1Unused: v[2],
- Op2Future: v[3],
- Access: v[4],
- Close: v[5],
- Commit: v[6],
- Create: v[7],
- DelegPurge: v[8],
- DelegReturn: v[9],
- GetAttr: v[10],
- GetFH: v[11],
- Link: v[12],
- Lock: v[13],
- Lockt: v[14],
- Locku: v[15],
- Lookup: v[16],
- LookupRoot: v[17],
- Nverify: v[18],
- Open: v[19],
- OpenAttr: v[20],
- OpenConfirm: v[21],
- OpenDgrd: v[22],
- PutFH: v[23],
- PutPubFH: v[24],
- PutRootFH: v[25],
- Read: v[26],
- ReadDir: v[27],
- ReadLink: v[28],
- Remove: v[29],
- Rename: v[30],
- Renew: v[31],
- RestoreFH: v[32],
- SaveFH: v[33],
- SecInfo: v[34],
- SetAttr: v[35],
- Verify: v[36],
- Write: v[37],
- RelLockOwner: v[38],
- }
-
- return stats, nil
-}
diff --git a/vendor/github.com/prometheus/procfs/nfs/parse_nfs.go b/vendor/github.com/prometheus/procfs/nfs/parse_nfs.go
deleted file mode 100644
index b5c0b15f3..000000000
--- a/vendor/github.com/prometheus/procfs/nfs/parse_nfs.go
+++ /dev/null
@@ -1,67 +0,0 @@
-// Copyright 2018 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package nfs
-
-import (
- "bufio"
- "fmt"
- "io"
- "strings"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-// ParseClientRPCStats returns stats read from /proc/net/rpc/nfs
-func ParseClientRPCStats(r io.Reader) (*ClientRPCStats, error) {
- stats := &ClientRPCStats{}
-
- scanner := bufio.NewScanner(r)
- for scanner.Scan() {
- line := scanner.Text()
- parts := strings.Fields(scanner.Text())
- // require at least
- if len(parts) < 2 {
- return nil, fmt.Errorf("invalid NFSd metric line %q", line)
- }
-
- values, err := util.ParseUint64s(parts[1:])
- if err != nil {
- return nil, fmt.Errorf("error parsing NFSd metric line: %s", err)
- }
-
- switch metricLine := parts[0]; metricLine {
- case "net":
- stats.Network, err = parseNetwork(values)
- case "rpc":
- stats.ClientRPC, err = parseClientRPC(values)
- case "proc2":
- stats.V2Stats, err = parseV2Stats(values)
- case "proc3":
- stats.V3Stats, err = parseV3Stats(values)
- case "proc4":
- stats.ClientV4Stats, err = parseClientV4Stats(values)
- default:
- return nil, fmt.Errorf("unknown NFSd metric line %q", metricLine)
- }
- if err != nil {
- return nil, fmt.Errorf("errors parsing NFSd metric line: %s", err)
- }
- }
-
- if err := scanner.Err(); err != nil {
- return nil, fmt.Errorf("error scanning NFSd file: %s", err)
- }
-
- return stats, nil
-}
diff --git a/vendor/github.com/prometheus/procfs/nfs/parse_nfsd.go b/vendor/github.com/prometheus/procfs/nfs/parse_nfsd.go
deleted file mode 100644
index 57bb4a358..000000000
--- a/vendor/github.com/prometheus/procfs/nfs/parse_nfsd.go
+++ /dev/null
@@ -1,89 +0,0 @@
-// Copyright 2018 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package nfs
-
-import (
- "bufio"
- "fmt"
- "io"
- "strings"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-// ParseServerRPCStats returns stats read from /proc/net/rpc/nfsd
-func ParseServerRPCStats(r io.Reader) (*ServerRPCStats, error) {
- stats := &ServerRPCStats{}
-
- scanner := bufio.NewScanner(r)
- for scanner.Scan() {
- line := scanner.Text()
- parts := strings.Fields(scanner.Text())
- // require at least
- if len(parts) < 2 {
- return nil, fmt.Errorf("invalid NFSd metric line %q", line)
- }
- label := parts[0]
-
- var values []uint64
- var err error
- if label == "th" {
- if len(parts) < 3 {
- return nil, fmt.Errorf("invalid NFSd th metric line %q", line)
- }
- values, err = util.ParseUint64s(parts[1:3])
- } else {
- values, err = util.ParseUint64s(parts[1:])
- }
- if err != nil {
- return nil, fmt.Errorf("error parsing NFSd metric line: %s", err)
- }
-
- switch metricLine := parts[0]; metricLine {
- case "rc":
- stats.ReplyCache, err = parseReplyCache(values)
- case "fh":
- stats.FileHandles, err = parseFileHandles(values)
- case "io":
- stats.InputOutput, err = parseInputOutput(values)
- case "th":
- stats.Threads, err = parseThreads(values)
- case "ra":
- stats.ReadAheadCache, err = parseReadAheadCache(values)
- case "net":
- stats.Network, err = parseNetwork(values)
- case "rpc":
- stats.ServerRPC, err = parseServerRPC(values)
- case "proc2":
- stats.V2Stats, err = parseV2Stats(values)
- case "proc3":
- stats.V3Stats, err = parseV3Stats(values)
- case "proc4":
- stats.ServerV4Stats, err = parseServerV4Stats(values)
- case "proc4ops":
- stats.V4Ops, err = parseV4Ops(values)
- default:
- return nil, fmt.Errorf("unknown NFSd metric line %q", metricLine)
- }
- if err != nil {
- return nil, fmt.Errorf("errors parsing NFSd metric line: %s", err)
- }
- }
-
- if err := scanner.Err(); err != nil {
- return nil, fmt.Errorf("error scanning NFSd file: %s", err)
- }
-
- return stats, nil
-}
diff --git a/vendor/github.com/prometheus/procfs/proc.go b/vendor/github.com/prometheus/procfs/proc.go
index 8717e1fe0..330e472c7 100644
--- a/vendor/github.com/prometheus/procfs/proc.go
+++ b/vendor/github.com/prometheus/procfs/proc.go
@@ -1,11 +1,28 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
package procfs
import (
+ "bytes"
"fmt"
"io/ioutil"
"os"
"strconv"
"strings"
+
+ "github.com/prometheus/procfs/internal/fs"
+ "github.com/prometheus/procfs/internal/util"
)
// Proc provides information about a running process.
@@ -13,7 +30,7 @@ type Proc struct {
// The process ID.
PID int
- fs FS
+ fs fs.FS
}
// Procs represents a list of Proc structs.
@@ -38,7 +55,7 @@ func NewProc(pid int) (Proc, error) {
if err != nil {
return Proc{}, err
}
- return fs.NewProc(pid)
+ return fs.Proc(pid)
}
// AllProcs returns a list of all currently available processes under /proc.
@@ -52,28 +69,35 @@ func AllProcs() (Procs, error) {
// Self returns a process for the current process.
func (fs FS) Self() (Proc, error) {
- p, err := os.Readlink(fs.Path("self"))
+ p, err := os.Readlink(fs.proc.Path("self"))
if err != nil {
return Proc{}, err
}
- pid, err := strconv.Atoi(strings.Replace(p, string(fs), "", -1))
+ pid, err := strconv.Atoi(strings.Replace(p, string(fs.proc), "", -1))
if err != nil {
return Proc{}, err
}
- return fs.NewProc(pid)
+ return fs.Proc(pid)
}
// NewProc returns a process for the given pid.
+//
+// Deprecated: use fs.Proc() instead
func (fs FS) NewProc(pid int) (Proc, error) {
- if _, err := os.Stat(fs.Path(strconv.Itoa(pid))); err != nil {
+ return fs.Proc(pid)
+}
+
+// Proc returns a process for the given pid.
+func (fs FS) Proc(pid int) (Proc, error) {
+ if _, err := os.Stat(fs.proc.Path(strconv.Itoa(pid))); err != nil {
return Proc{}, err
}
- return Proc{PID: pid, fs: fs}, nil
+ return Proc{PID: pid, fs: fs.proc}, nil
}
// AllProcs returns a list of all currently available processes.
func (fs FS) AllProcs() (Procs, error) {
- d, err := os.Open(fs.Path())
+ d, err := os.Open(fs.proc.Path())
if err != nil {
return Procs{}, err
}
@@ -90,7 +114,7 @@ func (fs FS) AllProcs() (Procs, error) {
if err != nil {
continue
}
- p = append(p, Proc{PID: int(pid), fs: fs})
+ p = append(p, Proc{PID: int(pid), fs: fs.proc})
}
return p, nil
@@ -98,13 +122,7 @@ func (fs FS) AllProcs() (Procs, error) {
// CmdLine returns the command line of a process.
func (p Proc) CmdLine() ([]string, error) {
- f, err := os.Open(p.path("cmdline"))
- if err != nil {
- return nil, err
- }
- defer f.Close()
-
- data, err := ioutil.ReadAll(f)
+ data, err := util.ReadFileNoStat(p.path("cmdline"))
if err != nil {
return nil, err
}
@@ -113,18 +131,12 @@ func (p Proc) CmdLine() ([]string, error) {
return []string{}, nil
}
- return strings.Split(string(data[:len(data)-1]), string(byte(0))), nil
+ return strings.Split(string(bytes.TrimRight(data, string("\x00"))), string(byte(0))), nil
}
// Comm returns the command name of a process.
func (p Proc) Comm() (string, error) {
- f, err := os.Open(p.path("comm"))
- if err != nil {
- return "", err
- }
- defer f.Close()
-
- data, err := ioutil.ReadAll(f)
+ data, err := util.ReadFileNoStat(p.path("comm"))
if err != nil {
return "", err
}
@@ -142,6 +154,26 @@ func (p Proc) Executable() (string, error) {
return exe, err
}
+// Cwd returns the absolute path to the current working directory of the process.
+func (p Proc) Cwd() (string, error) {
+ wd, err := os.Readlink(p.path("cwd"))
+ if os.IsNotExist(err) {
+ return "", nil
+ }
+
+ return wd, err
+}
+
+// RootDir returns the absolute path to the process's root directory (as set by chroot)
+func (p Proc) RootDir() (string, error) {
+ rdir, err := os.Readlink(p.path("root"))
+ if os.IsNotExist(err) {
+ return "", nil
+ }
+
+ return rdir, err
+}
+
// FileDescriptors returns the currently open file descriptors of a process.
func (p Proc) FileDescriptors() ([]uintptr, error) {
names, err := p.fileDescriptors()
@@ -204,6 +236,18 @@ func (p Proc) MountStats() ([]*Mount, error) {
return parseMountStats(f)
}
+// MountInfo retrieves mount information for mount points in a
+// process's namespace.
+// It supplies information missing in `/proc/self/mounts` and
+// fixes various other problems with that file too.
+func (p Proc) MountInfo() ([]*MountInfo, error) {
+ data, err := util.ReadFileNoStat(p.path("mountinfo"))
+ if err != nil {
+ return nil, err
+ }
+ return parseMountInfo(data)
+}
+
func (p Proc) fileDescriptors() ([]string, error) {
d, err := os.Open(p.path("fd"))
if err != nil {
@@ -222,3 +266,33 @@ func (p Proc) fileDescriptors() ([]string, error) {
func (p Proc) path(pa ...string) string {
return p.fs.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...)
}
+
+// FileDescriptorsInfo retrieves information about all file descriptors of
+// the process.
+func (p Proc) FileDescriptorsInfo() (ProcFDInfos, error) {
+ names, err := p.fileDescriptors()
+ if err != nil {
+ return nil, err
+ }
+
+ var fdinfos ProcFDInfos
+
+ for _, n := range names {
+ fdinfo, err := p.FDInfo(n)
+ if err != nil {
+ continue
+ }
+ fdinfos = append(fdinfos, *fdinfo)
+ }
+
+ return fdinfos, nil
+}
+
+// Schedstat returns task scheduling information for the process.
+func (p Proc) Schedstat() (ProcSchedstat, error) {
+ contents, err := ioutil.ReadFile(p.path("schedstat"))
+ if err != nil {
+ return ProcSchedstat{}, err
+ }
+ return parseProcSchedstat(string(contents))
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_environ.go b/vendor/github.com/prometheus/procfs/proc_environ.go
new file mode 100644
index 000000000..6134b3580
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_environ.go
@@ -0,0 +1,37 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "strings"
+
+ "github.com/prometheus/procfs/internal/util"
+)
+
+// Environ reads process environments from /proc//environ
+func (p Proc) Environ() ([]string, error) {
+ environments := make([]string, 0)
+
+ data, err := util.ReadFileNoStat(p.path("environ"))
+ if err != nil {
+ return environments, err
+ }
+
+ environments = strings.Split(string(data), "\000")
+ if len(environments) > 0 {
+ environments = environments[:len(environments)-1]
+ }
+
+ return environments, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_fdinfo.go b/vendor/github.com/prometheus/procfs/proc_fdinfo.go
new file mode 100644
index 000000000..4e7597f86
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_fdinfo.go
@@ -0,0 +1,125 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "bytes"
+ "regexp"
+
+ "github.com/prometheus/procfs/internal/util"
+)
+
+// Regexp variables
+var (
+ rPos = regexp.MustCompile(`^pos:\s+(\d+)$`)
+ rFlags = regexp.MustCompile(`^flags:\s+(\d+)$`)
+ rMntID = regexp.MustCompile(`^mnt_id:\s+(\d+)$`)
+ rInotify = regexp.MustCompile(`^inotify`)
+)
+
+// ProcFDInfo contains represents file descriptor information.
+type ProcFDInfo struct {
+ // File descriptor
+ FD string
+ // File offset
+ Pos string
+ // File access mode and status flags
+ Flags string
+ // Mount point ID
+ MntID string
+ // List of inotify lines (structed) in the fdinfo file (kernel 3.8+ only)
+ InotifyInfos []InotifyInfo
+}
+
+// FDInfo constructor. On kernels older than 3.8, InotifyInfos will always be empty.
+func (p Proc) FDInfo(fd string) (*ProcFDInfo, error) {
+ data, err := util.ReadFileNoStat(p.path("fdinfo", fd))
+ if err != nil {
+ return nil, err
+ }
+
+ var text, pos, flags, mntid string
+ var inotify []InotifyInfo
+
+ scanner := bufio.NewScanner(bytes.NewReader(data))
+ for scanner.Scan() {
+ text = scanner.Text()
+ if rPos.MatchString(text) {
+ pos = rPos.FindStringSubmatch(text)[1]
+ } else if rFlags.MatchString(text) {
+ flags = rFlags.FindStringSubmatch(text)[1]
+ } else if rMntID.MatchString(text) {
+ mntid = rMntID.FindStringSubmatch(text)[1]
+ } else if rInotify.MatchString(text) {
+ newInotify, err := parseInotifyInfo(text)
+ if err != nil {
+ return nil, err
+ }
+ inotify = append(inotify, *newInotify)
+ }
+ }
+
+ i := &ProcFDInfo{
+ FD: fd,
+ Pos: pos,
+ Flags: flags,
+ MntID: mntid,
+ InotifyInfos: inotify,
+ }
+
+ return i, nil
+}
+
+// InotifyInfo represents a single inotify line in the fdinfo file.
+type InotifyInfo struct {
+ // Watch descriptor number
+ WD string
+ // Inode number
+ Ino string
+ // Device ID
+ Sdev string
+ // Mask of events being monitored
+ Mask string
+}
+
+// InotifyInfo constructor. Only available on kernel 3.8+.
+func parseInotifyInfo(line string) (*InotifyInfo, error) {
+ r := regexp.MustCompile(`^inotify\s+wd:([0-9a-f]+)\s+ino:([0-9a-f]+)\s+sdev:([0-9a-f]+)\s+mask:([0-9a-f]+)`)
+ m := r.FindStringSubmatch(line)
+ i := &InotifyInfo{
+ WD: m[1],
+ Ino: m[2],
+ Sdev: m[3],
+ Mask: m[4],
+ }
+ return i, nil
+}
+
+// ProcFDInfos represents a list of ProcFDInfo structs.
+type ProcFDInfos []ProcFDInfo
+
+func (p ProcFDInfos) Len() int { return len(p) }
+func (p ProcFDInfos) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+func (p ProcFDInfos) Less(i, j int) bool { return p[i].FD < p[j].FD }
+
+// InotifyWatchLen returns the total number of inotify watches
+func (p ProcFDInfos) InotifyWatchLen() (int, error) {
+ length := 0
+ for _, f := range p {
+ length += len(f.InotifyInfos)
+ }
+
+ return length, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_io.go b/vendor/github.com/prometheus/procfs/proc_io.go
index e7f6674d0..776f34971 100644
--- a/vendor/github.com/prometheus/procfs/proc_io.go
+++ b/vendor/github.com/prometheus/procfs/proc_io.go
@@ -1,9 +1,22 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
package procfs
import (
"fmt"
- "io/ioutil"
- "os"
+
+ "github.com/prometheus/procfs/internal/util"
)
// ProcIO models the content of /proc//io.
@@ -26,17 +39,11 @@ type ProcIO struct {
CancelledWriteBytes int64
}
-// NewIO creates a new ProcIO instance from a given Proc instance.
-func (p Proc) NewIO() (ProcIO, error) {
+// IO creates a new ProcIO instance from a given Proc instance.
+func (p Proc) IO() (ProcIO, error) {
pio := ProcIO{}
- f, err := os.Open(p.path("io"))
- if err != nil {
- return pio, err
- }
- defer f.Close()
-
- data, err := ioutil.ReadAll(f)
+ data, err := util.ReadFileNoStat(p.path("io"))
if err != nil {
return pio, err
}
diff --git a/vendor/github.com/prometheus/procfs/proc_limits.go b/vendor/github.com/prometheus/procfs/proc_limits.go
index b684a5b55..91ee24df8 100644
--- a/vendor/github.com/prometheus/procfs/proc_limits.go
+++ b/vendor/github.com/prometheus/procfs/proc_limits.go
@@ -1,3 +1,16 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
package procfs
import (
@@ -65,7 +78,14 @@ var (
)
// NewLimits returns the current soft limits of the process.
+//
+// Deprecated: use p.Limits() instead
func (p Proc) NewLimits() (ProcLimits, error) {
+ return p.Limits()
+}
+
+// Limits returns the current soft limits of the process.
+func (p Proc) Limits() (ProcLimits, error) {
f, err := os.Open(p.path("limits"))
if err != nil {
return ProcLimits{}, err
diff --git a/vendor/github.com/prometheus/procfs/proc_ns.go b/vendor/github.com/prometheus/procfs/proc_ns.go
index befdd2690..c66740ff7 100644
--- a/vendor/github.com/prometheus/procfs/proc_ns.go
+++ b/vendor/github.com/prometheus/procfs/proc_ns.go
@@ -1,3 +1,16 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
package procfs
import (
@@ -16,9 +29,9 @@ type Namespace struct {
// Namespaces contains all of the namespaces that the process is contained in.
type Namespaces map[string]Namespace
-// NewNamespaces reads from /proc/[pid/ns/* to get the namespaces of which the
+// Namespaces reads from /proc//ns/* to get the namespaces of which the
// process is a member.
-func (p Proc) NewNamespaces() (Namespaces, error) {
+func (p Proc) Namespaces() (Namespaces, error) {
d, err := os.Open(p.path("ns"))
if err != nil {
return nil, err
diff --git a/vendor/github.com/prometheus/procfs/proc_psi.go b/vendor/github.com/prometheus/procfs/proc_psi.go
new file mode 100644
index 000000000..0d7bee54c
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_psi.go
@@ -0,0 +1,100 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+// The PSI / pressure interface is described at
+// https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/Documentation/accounting/psi.txt
+// Each resource (cpu, io, memory, ...) is exposed as a single file.
+// Each file may contain up to two lines, one for "some" pressure and one for "full" pressure.
+// Each line contains several averages (over n seconds) and a total in µs.
+//
+// Example io pressure file:
+// > some avg10=0.06 avg60=0.21 avg300=0.99 total=8537362
+// > full avg10=0.00 avg60=0.13 avg300=0.96 total=8183134
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "strings"
+
+ "github.com/prometheus/procfs/internal/util"
+)
+
+const lineFormat = "avg10=%f avg60=%f avg300=%f total=%d"
+
+// PSILine is a single line of values as returned by /proc/pressure/*
+// The Avg entries are averages over n seconds, as a percentage
+// The Total line is in microseconds
+type PSILine struct {
+ Avg10 float64
+ Avg60 float64
+ Avg300 float64
+ Total uint64
+}
+
+// PSIStats represent pressure stall information from /proc/pressure/*
+// Some indicates the share of time in which at least some tasks are stalled
+// Full indicates the share of time in which all non-idle tasks are stalled simultaneously
+type PSIStats struct {
+ Some *PSILine
+ Full *PSILine
+}
+
+// PSIStatsForResource reads pressure stall information for the specified
+// resource from /proc/pressure/. At time of writing this can be
+// either "cpu", "memory" or "io".
+func (fs FS) PSIStatsForResource(resource string) (PSIStats, error) {
+ data, err := util.ReadFileNoStat(fs.proc.Path(fmt.Sprintf("%s/%s", "pressure", resource)))
+ if err != nil {
+ return PSIStats{}, fmt.Errorf("psi_stats: unavailable for %s", resource)
+ }
+
+ return parsePSIStats(resource, bytes.NewReader(data))
+}
+
+// parsePSIStats parses the specified file for pressure stall information
+func parsePSIStats(resource string, r io.Reader) (PSIStats, error) {
+ psiStats := PSIStats{}
+
+ scanner := bufio.NewScanner(r)
+ for scanner.Scan() {
+ l := scanner.Text()
+ prefix := strings.Split(l, " ")[0]
+ switch prefix {
+ case "some":
+ psi := PSILine{}
+ _, err := fmt.Sscanf(l, fmt.Sprintf("some %s", lineFormat), &psi.Avg10, &psi.Avg60, &psi.Avg300, &psi.Total)
+ if err != nil {
+ return PSIStats{}, err
+ }
+ psiStats.Some = &psi
+ case "full":
+ psi := PSILine{}
+ _, err := fmt.Sscanf(l, fmt.Sprintf("full %s", lineFormat), &psi.Avg10, &psi.Avg60, &psi.Avg300, &psi.Total)
+ if err != nil {
+ return PSIStats{}, err
+ }
+ psiStats.Full = &psi
+ default:
+ // If we encounter a line with an unknown prefix, ignore it and move on
+ // Should new measurement types be added in the future we'll simply ignore them instead
+ // of erroring on retrieval
+ continue
+ }
+ }
+
+ return psiStats, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_stat.go b/vendor/github.com/prometheus/procfs/proc_stat.go
index 724e271b9..4517d2e9d 100644
--- a/vendor/github.com/prometheus/procfs/proc_stat.go
+++ b/vendor/github.com/prometheus/procfs/proc_stat.go
@@ -1,10 +1,25 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
package procfs
import (
"bytes"
"fmt"
- "io/ioutil"
"os"
+
+ "github.com/prometheus/procfs/internal/fs"
+ "github.com/prometheus/procfs/internal/util"
)
// Originally, this USER_HZ value was dynamically retrieved via a sysconf call
@@ -82,22 +97,23 @@ type ProcStat struct {
// in clock ticks.
Starttime uint64
// Virtual memory size in bytes.
- VSize int
+ VSize uint
// Resident set size in pages.
RSS int
- fs FS
+ proc fs.FS
}
// NewStat returns the current status information of the process.
+//
+// Deprecated: use p.Stat() instead
func (p Proc) NewStat() (ProcStat, error) {
- f, err := os.Open(p.path("stat"))
- if err != nil {
- return ProcStat{}, err
- }
- defer f.Close()
+ return p.Stat()
+}
- data, err := ioutil.ReadAll(f)
+// Stat returns the current status information of the process.
+func (p Proc) Stat() (ProcStat, error) {
+ data, err := util.ReadFileNoStat(p.path("stat"))
if err != nil {
return ProcStat{}, err
}
@@ -105,7 +121,7 @@ func (p Proc) NewStat() (ProcStat, error) {
var (
ignore int
- s = ProcStat{PID: p.PID, fs: p.fs}
+ s = ProcStat{PID: p.PID, proc: p.fs}
l = bytes.Index(data, []byte("("))
r = bytes.LastIndex(data, []byte(")"))
)
@@ -151,7 +167,7 @@ func (p Proc) NewStat() (ProcStat, error) {
}
// VirtualMemory returns the virtual memory size in bytes.
-func (s ProcStat) VirtualMemory() int {
+func (s ProcStat) VirtualMemory() uint {
return s.VSize
}
@@ -162,7 +178,8 @@ func (s ProcStat) ResidentMemory() int {
// StartTime returns the unix timestamp of the process in seconds.
func (s ProcStat) StartTime() (float64, error) {
- stat, err := s.fs.NewStat()
+ fs := FS{proc: s.proc}
+ stat, err := fs.Stat()
if err != nil {
return 0, err
}
diff --git a/vendor/github.com/prometheus/procfs/proc_status.go b/vendor/github.com/prometheus/procfs/proc_status.go
new file mode 100644
index 000000000..e30c2b88f
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_status.go
@@ -0,0 +1,161 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bytes"
+ "strconv"
+ "strings"
+
+ "github.com/prometheus/procfs/internal/util"
+)
+
+// ProcStatus provides status information about the process,
+// read from /proc/[pid]/stat.
+type ProcStatus struct {
+ // The process ID.
+ PID int
+ // The process name.
+ Name string
+
+ // Thread group ID.
+ TGID int
+
+ // Peak virtual memory size.
+ VmPeak uint64
+ // Virtual memory size.
+ VmSize uint64
+ // Locked memory size.
+ VmLck uint64
+ // Pinned memory size.
+ VmPin uint64
+ // Peak resident set size.
+ VmHWM uint64
+ // Resident set size (sum of RssAnnon RssFile and RssShmem).
+ VmRSS uint64
+ // Size of resident anonymous memory.
+ RssAnon uint64
+ // Size of resident file mappings.
+ RssFile uint64
+ // Size of resident shared memory.
+ RssShmem uint64
+ // Size of data segments.
+ VmData uint64
+ // Size of stack segments.
+ VmStk uint64
+ // Size of text segments.
+ VmExe uint64
+ // Shared library code size.
+ VmLib uint64
+ // Page table entries size.
+ VmPTE uint64
+ // Size of second-level page tables.
+ VmPMD uint64
+ // Swapped-out virtual memory size by anonymous private.
+ VmSwap uint64
+ // Size of hugetlb memory portions
+ HugetlbPages uint64
+
+ // Number of voluntary context switches.
+ VoluntaryCtxtSwitches uint64
+ // Number of involuntary context switches.
+ NonVoluntaryCtxtSwitches uint64
+}
+
+// NewStatus returns the current status information of the process.
+func (p Proc) NewStatus() (ProcStatus, error) {
+ data, err := util.ReadFileNoStat(p.path("status"))
+ if err != nil {
+ return ProcStatus{}, err
+ }
+
+ s := ProcStatus{PID: p.PID}
+
+ lines := strings.Split(string(data), "\n")
+ for _, line := range lines {
+ if !bytes.Contains([]byte(line), []byte(":")) {
+ continue
+ }
+
+ kv := strings.SplitN(line, ":", 2)
+
+ // removes spaces
+ k := string(strings.TrimSpace(kv[0]))
+ v := string(strings.TrimSpace(kv[1]))
+ // removes "kB"
+ v = string(bytes.Trim([]byte(v), " kB"))
+
+ // value to int when possible
+ // we can skip error check here, 'cause vKBytes is not used when value is a string
+ vKBytes, _ := strconv.ParseUint(v, 10, 64)
+ // convert kB to B
+ vBytes := vKBytes * 1024
+
+ s.fillStatus(k, v, vKBytes, vBytes)
+ }
+
+ return s, nil
+}
+
+func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintBytes uint64) {
+ switch k {
+ case "Tgid":
+ s.TGID = int(vUint)
+ case "Name":
+ s.Name = vString
+ case "VmPeak":
+ s.VmPeak = vUintBytes
+ case "VmSize":
+ s.VmSize = vUintBytes
+ case "VmLck":
+ s.VmLck = vUintBytes
+ case "VmPin":
+ s.VmPin = vUintBytes
+ case "VmHWM":
+ s.VmHWM = vUintBytes
+ case "VmRSS":
+ s.VmRSS = vUintBytes
+ case "RssAnon":
+ s.RssAnon = vUintBytes
+ case "RssFile":
+ s.RssFile = vUintBytes
+ case "RssShmem":
+ s.RssShmem = vUintBytes
+ case "VmData":
+ s.VmData = vUintBytes
+ case "VmStk":
+ s.VmStk = vUintBytes
+ case "VmExe":
+ s.VmExe = vUintBytes
+ case "VmLib":
+ s.VmLib = vUintBytes
+ case "VmPTE":
+ s.VmPTE = vUintBytes
+ case "VmPMD":
+ s.VmPMD = vUintBytes
+ case "VmSwap":
+ s.VmSwap = vUintBytes
+ case "HugetlbPages":
+ s.HugetlbPages = vUintBytes
+ case "voluntary_ctxt_switches":
+ s.VoluntaryCtxtSwitches = vUint
+ case "nonvoluntary_ctxt_switches":
+ s.NonVoluntaryCtxtSwitches = vUint
+ }
+}
+
+// TotalCtxtSwitches returns the total context switch.
+func (s ProcStatus) TotalCtxtSwitches() uint64 {
+ return s.VoluntaryCtxtSwitches + s.NonVoluntaryCtxtSwitches
+}
diff --git a/vendor/github.com/prometheus/procfs/schedstat.go b/vendor/github.com/prometheus/procfs/schedstat.go
new file mode 100644
index 000000000..a4c4089ac
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/schedstat.go
@@ -0,0 +1,118 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "errors"
+ "os"
+ "regexp"
+ "strconv"
+)
+
+var (
+ cpuLineRE = regexp.MustCompile(`cpu(\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+)`)
+ procLineRE = regexp.MustCompile(`(\d+) (\d+) (\d+)`)
+)
+
+// Schedstat contains scheduler statistics from /proc/schedstat
+//
+// See
+// https://www.kernel.org/doc/Documentation/scheduler/sched-stats.txt
+// for a detailed description of what these numbers mean.
+//
+// Note the current kernel documentation claims some of the time units are in
+// jiffies when they are actually in nanoseconds since 2.6.23 with the
+// introduction of CFS. A fix to the documentation is pending. See
+// https://lore.kernel.org/patchwork/project/lkml/list/?series=403473
+type Schedstat struct {
+ CPUs []*SchedstatCPU
+}
+
+// SchedstatCPU contains the values from one "cpu" line
+type SchedstatCPU struct {
+ CPUNum string
+
+ RunningNanoseconds uint64
+ WaitingNanoseconds uint64
+ RunTimeslices uint64
+}
+
+// ProcSchedstat contains the values from /proc//schedstat
+type ProcSchedstat struct {
+ RunningNanoseconds uint64
+ WaitingNanoseconds uint64
+ RunTimeslices uint64
+}
+
+// Schedstat reads data from /proc/schedstat
+func (fs FS) Schedstat() (*Schedstat, error) {
+ file, err := os.Open(fs.proc.Path("schedstat"))
+ if err != nil {
+ return nil, err
+ }
+ defer file.Close()
+
+ stats := &Schedstat{}
+ scanner := bufio.NewScanner(file)
+
+ for scanner.Scan() {
+ match := cpuLineRE.FindStringSubmatch(scanner.Text())
+ if match != nil {
+ cpu := &SchedstatCPU{}
+ cpu.CPUNum = match[1]
+
+ cpu.RunningNanoseconds, err = strconv.ParseUint(match[8], 10, 64)
+ if err != nil {
+ continue
+ }
+
+ cpu.WaitingNanoseconds, err = strconv.ParseUint(match[9], 10, 64)
+ if err != nil {
+ continue
+ }
+
+ cpu.RunTimeslices, err = strconv.ParseUint(match[10], 10, 64)
+ if err != nil {
+ continue
+ }
+
+ stats.CPUs = append(stats.CPUs, cpu)
+ }
+ }
+
+ return stats, nil
+}
+
+func parseProcSchedstat(contents string) (stats ProcSchedstat, err error) {
+ match := procLineRE.FindStringSubmatch(contents)
+
+ if match != nil {
+ stats.RunningNanoseconds, err = strconv.ParseUint(match[1], 10, 64)
+ if err != nil {
+ return
+ }
+
+ stats.WaitingNanoseconds, err = strconv.ParseUint(match[2], 10, 64)
+ if err != nil {
+ return
+ }
+
+ stats.RunTimeslices, err = strconv.ParseUint(match[3], 10, 64)
+ return
+ }
+
+ err = errors.New("could not parse schedstat")
+ return
+}
diff --git a/vendor/github.com/prometheus/procfs/stat.go b/vendor/github.com/prometheus/procfs/stat.go
index 701f4df64..b2a6fc994 100644
--- a/vendor/github.com/prometheus/procfs/stat.go
+++ b/vendor/github.com/prometheus/procfs/stat.go
@@ -1,12 +1,28 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
package procfs
import (
"bufio"
+ "bytes"
"fmt"
"io"
- "os"
"strconv"
"strings"
+
+ "github.com/prometheus/procfs/internal/fs"
+ "github.com/prometheus/procfs/internal/util"
)
// CPUStat shows how much time the cpu spend in various stages.
@@ -65,16 +81,6 @@ type Stat struct {
SoftIRQ SoftIRQStat
}
-// NewStat returns kernel/system statistics read from /proc/stat.
-func NewStat() (Stat, error) {
- fs, err := NewFS(DefaultMountPoint)
- if err != nil {
- return Stat{}, err
- }
-
- return fs.NewStat()
-}
-
// Parse a cpu statistics line and returns the CPUStat struct plus the cpu id (or -1 for the overall sum).
func parseCPUStat(line string) (CPUStat, int64, error) {
cpuStat := CPUStat{}
@@ -136,19 +142,38 @@ func parseSoftIRQStat(line string) (SoftIRQStat, uint64, error) {
return softIRQStat, total, nil
}
-// NewStat returns an information about current kernel/system statistics.
+// NewStat returns information about current cpu/process statistics.
+// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt
+//
+// Deprecated: use fs.Stat() instead
+func NewStat() (Stat, error) {
+ fs, err := NewFS(fs.DefaultProcMountPoint)
+ if err != nil {
+ return Stat{}, err
+ }
+ return fs.Stat()
+}
+
+// NewStat returns information about current cpu/process statistics.
+// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt
+//
+// Deprecated: use fs.Stat() instead
func (fs FS) NewStat() (Stat, error) {
- // See https://www.kernel.org/doc/Documentation/filesystems/proc.txt
+ return fs.Stat()
+}
- f, err := os.Open(fs.Path("stat"))
+// Stat returns information about current cpu/process statistics.
+// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt
+func (fs FS) Stat() (Stat, error) {
+ fileName := fs.proc.Path("stat")
+ data, err := util.ReadFileNoStat(fileName)
if err != nil {
return Stat{}, err
}
- defer f.Close()
stat := Stat{}
- scanner := bufio.NewScanner(f)
+ scanner := bufio.NewScanner(bytes.NewReader(data))
for scanner.Scan() {
line := scanner.Text()
parts := strings.Fields(scanner.Text())
@@ -212,7 +237,7 @@ func (fs FS) NewStat() (Stat, error) {
}
if err := scanner.Err(); err != nil {
- return Stat{}, fmt.Errorf("couldn't parse %s: %s", f.Name(), err)
+ return Stat{}, fmt.Errorf("couldn't parse %s: %s", fileName, err)
}
return stat, nil
diff --git a/vendor/github.com/prometheus/procfs/vm.go b/vendor/github.com/prometheus/procfs/vm.go
new file mode 100644
index 000000000..cb1389141
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/vm.go
@@ -0,0 +1,210 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build !windows
+
+package procfs
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/prometheus/procfs/internal/util"
+)
+
+// The VM interface is described at
+// https://www.kernel.org/doc/Documentation/sysctl/vm.txt
+// Each setting is exposed as a single file.
+// Each file contains one line with a single numerical value, except lowmem_reserve_ratio which holds an array
+// and numa_zonelist_order (deprecated) which is a string
+type VM struct {
+ AdminReserveKbytes *int64 // /proc/sys/vm/admin_reserve_kbytes
+ BlockDump *int64 // /proc/sys/vm/block_dump
+ CompactUnevictableAllowed *int64 // /proc/sys/vm/compact_unevictable_allowed
+ DirtyBackgroundBytes *int64 // /proc/sys/vm/dirty_background_bytes
+ DirtyBackgroundRatio *int64 // /proc/sys/vm/dirty_background_ratio
+ DirtyBytes *int64 // /proc/sys/vm/dirty_bytes
+ DirtyExpireCentisecs *int64 // /proc/sys/vm/dirty_expire_centisecs
+ DirtyRatio *int64 // /proc/sys/vm/dirty_ratio
+ DirtytimeExpireSeconds *int64 // /proc/sys/vm/dirtytime_expire_seconds
+ DirtyWritebackCentisecs *int64 // /proc/sys/vm/dirty_writeback_centisecs
+ DropCaches *int64 // /proc/sys/vm/drop_caches
+ ExtfragThreshold *int64 // /proc/sys/vm/extfrag_threshold
+ HugetlbShmGroup *int64 // /proc/sys/vm/hugetlb_shm_group
+ LaptopMode *int64 // /proc/sys/vm/laptop_mode
+ LegacyVaLayout *int64 // /proc/sys/vm/legacy_va_layout
+ LowmemReserveRatio []*int64 // /proc/sys/vm/lowmem_reserve_ratio
+ MaxMapCount *int64 // /proc/sys/vm/max_map_count
+ MemoryFailureEarlyKill *int64 // /proc/sys/vm/memory_failure_early_kill
+ MemoryFailureRecovery *int64 // /proc/sys/vm/memory_failure_recovery
+ MinFreeKbytes *int64 // /proc/sys/vm/min_free_kbytes
+ MinSlabRatio *int64 // /proc/sys/vm/min_slab_ratio
+ MinUnmappedRatio *int64 // /proc/sys/vm/min_unmapped_ratio
+ MmapMinAddr *int64 // /proc/sys/vm/mmap_min_addr
+ NrHugepages *int64 // /proc/sys/vm/nr_hugepages
+ NrHugepagesMempolicy *int64 // /proc/sys/vm/nr_hugepages_mempolicy
+ NrOvercommitHugepages *int64 // /proc/sys/vm/nr_overcommit_hugepages
+ NumaStat *int64 // /proc/sys/vm/numa_stat
+ NumaZonelistOrder string // /proc/sys/vm/numa_zonelist_order
+ OomDumpTasks *int64 // /proc/sys/vm/oom_dump_tasks
+ OomKillAllocatingTask *int64 // /proc/sys/vm/oom_kill_allocating_task
+ OvercommitKbytes *int64 // /proc/sys/vm/overcommit_kbytes
+ OvercommitMemory *int64 // /proc/sys/vm/overcommit_memory
+ OvercommitRatio *int64 // /proc/sys/vm/overcommit_ratio
+ PageCluster *int64 // /proc/sys/vm/page-cluster
+ PanicOnOom *int64 // /proc/sys/vm/panic_on_oom
+ PercpuPagelistFraction *int64 // /proc/sys/vm/percpu_pagelist_fraction
+ StatInterval *int64 // /proc/sys/vm/stat_interval
+ Swappiness *int64 // /proc/sys/vm/swappiness
+ UserReserveKbytes *int64 // /proc/sys/vm/user_reserve_kbytes
+ VfsCachePressure *int64 // /proc/sys/vm/vfs_cache_pressure
+ WatermarkBoostFactor *int64 // /proc/sys/vm/watermark_boost_factor
+ WatermarkScaleFactor *int64 // /proc/sys/vm/watermark_scale_factor
+ ZoneReclaimMode *int64 // /proc/sys/vm/zone_reclaim_mode
+}
+
+// VM reads the VM statistics from the specified `proc` filesystem.
+func (fs FS) VM() (*VM, error) {
+ path := fs.proc.Path("sys/vm")
+ file, err := os.Stat(path)
+ if err != nil {
+ return nil, err
+ }
+ if !file.Mode().IsDir() {
+ return nil, fmt.Errorf("%s is not a directory", path)
+ }
+
+ files, err := ioutil.ReadDir(path)
+ if err != nil {
+ return nil, err
+ }
+
+ var vm VM
+ for _, f := range files {
+ if f.IsDir() {
+ continue
+ }
+
+ name := filepath.Join(path, f.Name())
+ // ignore errors on read, as there are some write only
+ // in /proc/sys/vm
+ value, err := util.SysReadFile(name)
+ if err != nil {
+ continue
+ }
+ vp := util.NewValueParser(value)
+
+ switch f.Name() {
+ case "admin_reserve_kbytes":
+ vm.AdminReserveKbytes = vp.PInt64()
+ case "block_dump":
+ vm.BlockDump = vp.PInt64()
+ case "compact_unevictable_allowed":
+ vm.CompactUnevictableAllowed = vp.PInt64()
+ case "dirty_background_bytes":
+ vm.DirtyBackgroundBytes = vp.PInt64()
+ case "dirty_background_ratio":
+ vm.DirtyBackgroundRatio = vp.PInt64()
+ case "dirty_bytes":
+ vm.DirtyBytes = vp.PInt64()
+ case "dirty_expire_centisecs":
+ vm.DirtyExpireCentisecs = vp.PInt64()
+ case "dirty_ratio":
+ vm.DirtyRatio = vp.PInt64()
+ case "dirtytime_expire_seconds":
+ vm.DirtytimeExpireSeconds = vp.PInt64()
+ case "dirty_writeback_centisecs":
+ vm.DirtyWritebackCentisecs = vp.PInt64()
+ case "drop_caches":
+ vm.DropCaches = vp.PInt64()
+ case "extfrag_threshold":
+ vm.ExtfragThreshold = vp.PInt64()
+ case "hugetlb_shm_group":
+ vm.HugetlbShmGroup = vp.PInt64()
+ case "laptop_mode":
+ vm.LaptopMode = vp.PInt64()
+ case "legacy_va_layout":
+ vm.LegacyVaLayout = vp.PInt64()
+ case "lowmem_reserve_ratio":
+ stringSlice := strings.Fields(value)
+ pint64Slice := make([]*int64, 0, len(stringSlice))
+ for _, value := range stringSlice {
+ vp := util.NewValueParser(value)
+ pint64Slice = append(pint64Slice, vp.PInt64())
+ }
+ vm.LowmemReserveRatio = pint64Slice
+ case "max_map_count":
+ vm.MaxMapCount = vp.PInt64()
+ case "memory_failure_early_kill":
+ vm.MemoryFailureEarlyKill = vp.PInt64()
+ case "memory_failure_recovery":
+ vm.MemoryFailureRecovery = vp.PInt64()
+ case "min_free_kbytes":
+ vm.MinFreeKbytes = vp.PInt64()
+ case "min_slab_ratio":
+ vm.MinSlabRatio = vp.PInt64()
+ case "min_unmapped_ratio":
+ vm.MinUnmappedRatio = vp.PInt64()
+ case "mmap_min_addr":
+ vm.MmapMinAddr = vp.PInt64()
+ case "nr_hugepages":
+ vm.NrHugepages = vp.PInt64()
+ case "nr_hugepages_mempolicy":
+ vm.NrHugepagesMempolicy = vp.PInt64()
+ case "nr_overcommit_hugepages":
+ vm.NrOvercommitHugepages = vp.PInt64()
+ case "numa_stat":
+ vm.NumaStat = vp.PInt64()
+ case "numa_zonelist_order":
+ vm.NumaZonelistOrder = value
+ case "oom_dump_tasks":
+ vm.OomDumpTasks = vp.PInt64()
+ case "oom_kill_allocating_task":
+ vm.OomKillAllocatingTask = vp.PInt64()
+ case "overcommit_kbytes":
+ vm.OvercommitKbytes = vp.PInt64()
+ case "overcommit_memory":
+ vm.OvercommitMemory = vp.PInt64()
+ case "overcommit_ratio":
+ vm.OvercommitRatio = vp.PInt64()
+ case "page-cluster":
+ vm.PageCluster = vp.PInt64()
+ case "panic_on_oom":
+ vm.PanicOnOom = vp.PInt64()
+ case "percpu_pagelist_fraction":
+ vm.PercpuPagelistFraction = vp.PInt64()
+ case "stat_interval":
+ vm.StatInterval = vp.PInt64()
+ case "swappiness":
+ vm.Swappiness = vp.PInt64()
+ case "user_reserve_kbytes":
+ vm.UserReserveKbytes = vp.PInt64()
+ case "vfs_cache_pressure":
+ vm.VfsCachePressure = vp.PInt64()
+ case "watermark_boost_factor":
+ vm.WatermarkBoostFactor = vp.PInt64()
+ case "watermark_scale_factor":
+ vm.WatermarkScaleFactor = vp.PInt64()
+ case "zone_reclaim_mode":
+ vm.ZoneReclaimMode = vp.PInt64()
+ }
+ if err := vp.Err(); err != nil {
+ return nil, err
+ }
+ }
+
+ return &vm, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/xfrm.go b/vendor/github.com/prometheus/procfs/xfrm.go
index ffe9df50d..30aa417d5 100644
--- a/vendor/github.com/prometheus/procfs/xfrm.go
+++ b/vendor/github.com/prometheus/procfs/xfrm.go
@@ -97,7 +97,7 @@ func NewXfrmStat() (XfrmStat, error) {
// NewXfrmStat reads the xfrm_stat statistics from the 'proc' filesystem.
func (fs FS) NewXfrmStat() (XfrmStat, error) {
- file, err := os.Open(fs.Path("net/xfrm_stat"))
+ file, err := os.Open(fs.proc.Path("net/xfrm_stat"))
if err != nil {
return XfrmStat{}, err
}
@@ -113,7 +113,7 @@ func (fs FS) NewXfrmStat() (XfrmStat, error) {
if len(fields) != 2 {
return XfrmStat{}, fmt.Errorf(
- "couldnt parse %s line %s", file.Name(), s.Text())
+ "couldn't parse %s line %s", file.Name(), s.Text())
}
name := fields[0]
diff --git a/vendor/github.com/prometheus/procfs/xfs/parse.go b/vendor/github.com/prometheus/procfs/xfs/parse.go
deleted file mode 100644
index 2bc0ef342..000000000
--- a/vendor/github.com/prometheus/procfs/xfs/parse.go
+++ /dev/null
@@ -1,330 +0,0 @@
-// Copyright 2017 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package xfs
-
-import (
- "bufio"
- "fmt"
- "io"
- "strings"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-// ParseStats parses a Stats from an input io.Reader, using the format
-// found in /proc/fs/xfs/stat.
-func ParseStats(r io.Reader) (*Stats, error) {
- const (
- // Fields parsed into stats structures.
- fieldExtentAlloc = "extent_alloc"
- fieldAbt = "abt"
- fieldBlkMap = "blk_map"
- fieldBmbt = "bmbt"
- fieldDir = "dir"
- fieldTrans = "trans"
- fieldIg = "ig"
- fieldLog = "log"
- fieldRw = "rw"
- fieldAttr = "attr"
- fieldIcluster = "icluster"
- fieldVnodes = "vnodes"
- fieldBuf = "buf"
- fieldXpc = "xpc"
-
- // Unimplemented at this time due to lack of documentation.
- fieldPushAil = "push_ail"
- fieldXstrat = "xstrat"
- fieldAbtb2 = "abtb2"
- fieldAbtc2 = "abtc2"
- fieldBmbt2 = "bmbt2"
- fieldIbt2 = "ibt2"
- fieldFibt2 = "fibt2"
- fieldQm = "qm"
- fieldDebug = "debug"
- )
-
- var xfss Stats
-
- s := bufio.NewScanner(r)
- for s.Scan() {
- // Expect at least a string label and a single integer value, ex:
- // - abt 0
- // - rw 1 2
- ss := strings.Fields(string(s.Bytes()))
- if len(ss) < 2 {
- continue
- }
- label := ss[0]
-
- // Extended precision counters are uint64 values.
- if label == fieldXpc {
- us, err := util.ParseUint64s(ss[1:])
- if err != nil {
- return nil, err
- }
-
- xfss.ExtendedPrecision, err = extendedPrecisionStats(us)
- if err != nil {
- return nil, err
- }
-
- continue
- }
-
- // All other counters are uint32 values.
- us, err := util.ParseUint32s(ss[1:])
- if err != nil {
- return nil, err
- }
-
- switch label {
- case fieldExtentAlloc:
- xfss.ExtentAllocation, err = extentAllocationStats(us)
- case fieldAbt:
- xfss.AllocationBTree, err = btreeStats(us)
- case fieldBlkMap:
- xfss.BlockMapping, err = blockMappingStats(us)
- case fieldBmbt:
- xfss.BlockMapBTree, err = btreeStats(us)
- case fieldDir:
- xfss.DirectoryOperation, err = directoryOperationStats(us)
- case fieldTrans:
- xfss.Transaction, err = transactionStats(us)
- case fieldIg:
- xfss.InodeOperation, err = inodeOperationStats(us)
- case fieldLog:
- xfss.LogOperation, err = logOperationStats(us)
- case fieldRw:
- xfss.ReadWrite, err = readWriteStats(us)
- case fieldAttr:
- xfss.AttributeOperation, err = attributeOperationStats(us)
- case fieldIcluster:
- xfss.InodeClustering, err = inodeClusteringStats(us)
- case fieldVnodes:
- xfss.Vnode, err = vnodeStats(us)
- case fieldBuf:
- xfss.Buffer, err = bufferStats(us)
- }
- if err != nil {
- return nil, err
- }
- }
-
- return &xfss, s.Err()
-}
-
-// extentAllocationStats builds an ExtentAllocationStats from a slice of uint32s.
-func extentAllocationStats(us []uint32) (ExtentAllocationStats, error) {
- if l := len(us); l != 4 {
- return ExtentAllocationStats{}, fmt.Errorf("incorrect number of values for XFS extent allocation stats: %d", l)
- }
-
- return ExtentAllocationStats{
- ExtentsAllocated: us[0],
- BlocksAllocated: us[1],
- ExtentsFreed: us[2],
- BlocksFreed: us[3],
- }, nil
-}
-
-// btreeStats builds a BTreeStats from a slice of uint32s.
-func btreeStats(us []uint32) (BTreeStats, error) {
- if l := len(us); l != 4 {
- return BTreeStats{}, fmt.Errorf("incorrect number of values for XFS btree stats: %d", l)
- }
-
- return BTreeStats{
- Lookups: us[0],
- Compares: us[1],
- RecordsInserted: us[2],
- RecordsDeleted: us[3],
- }, nil
-}
-
-// BlockMappingStat builds a BlockMappingStats from a slice of uint32s.
-func blockMappingStats(us []uint32) (BlockMappingStats, error) {
- if l := len(us); l != 7 {
- return BlockMappingStats{}, fmt.Errorf("incorrect number of values for XFS block mapping stats: %d", l)
- }
-
- return BlockMappingStats{
- Reads: us[0],
- Writes: us[1],
- Unmaps: us[2],
- ExtentListInsertions: us[3],
- ExtentListDeletions: us[4],
- ExtentListLookups: us[5],
- ExtentListCompares: us[6],
- }, nil
-}
-
-// DirectoryOperationStats builds a DirectoryOperationStats from a slice of uint32s.
-func directoryOperationStats(us []uint32) (DirectoryOperationStats, error) {
- if l := len(us); l != 4 {
- return DirectoryOperationStats{}, fmt.Errorf("incorrect number of values for XFS directory operation stats: %d", l)
- }
-
- return DirectoryOperationStats{
- Lookups: us[0],
- Creates: us[1],
- Removes: us[2],
- Getdents: us[3],
- }, nil
-}
-
-// TransactionStats builds a TransactionStats from a slice of uint32s.
-func transactionStats(us []uint32) (TransactionStats, error) {
- if l := len(us); l != 3 {
- return TransactionStats{}, fmt.Errorf("incorrect number of values for XFS transaction stats: %d", l)
- }
-
- return TransactionStats{
- Sync: us[0],
- Async: us[1],
- Empty: us[2],
- }, nil
-}
-
-// InodeOperationStats builds an InodeOperationStats from a slice of uint32s.
-func inodeOperationStats(us []uint32) (InodeOperationStats, error) {
- if l := len(us); l != 7 {
- return InodeOperationStats{}, fmt.Errorf("incorrect number of values for XFS inode operation stats: %d", l)
- }
-
- return InodeOperationStats{
- Attempts: us[0],
- Found: us[1],
- Recycle: us[2],
- Missed: us[3],
- Duplicate: us[4],
- Reclaims: us[5],
- AttributeChange: us[6],
- }, nil
-}
-
-// LogOperationStats builds a LogOperationStats from a slice of uint32s.
-func logOperationStats(us []uint32) (LogOperationStats, error) {
- if l := len(us); l != 5 {
- return LogOperationStats{}, fmt.Errorf("incorrect number of values for XFS log operation stats: %d", l)
- }
-
- return LogOperationStats{
- Writes: us[0],
- Blocks: us[1],
- NoInternalBuffers: us[2],
- Force: us[3],
- ForceSleep: us[4],
- }, nil
-}
-
-// ReadWriteStats builds a ReadWriteStats from a slice of uint32s.
-func readWriteStats(us []uint32) (ReadWriteStats, error) {
- if l := len(us); l != 2 {
- return ReadWriteStats{}, fmt.Errorf("incorrect number of values for XFS read write stats: %d", l)
- }
-
- return ReadWriteStats{
- Read: us[0],
- Write: us[1],
- }, nil
-}
-
-// AttributeOperationStats builds an AttributeOperationStats from a slice of uint32s.
-func attributeOperationStats(us []uint32) (AttributeOperationStats, error) {
- if l := len(us); l != 4 {
- return AttributeOperationStats{}, fmt.Errorf("incorrect number of values for XFS attribute operation stats: %d", l)
- }
-
- return AttributeOperationStats{
- Get: us[0],
- Set: us[1],
- Remove: us[2],
- List: us[3],
- }, nil
-}
-
-// InodeClusteringStats builds an InodeClusteringStats from a slice of uint32s.
-func inodeClusteringStats(us []uint32) (InodeClusteringStats, error) {
- if l := len(us); l != 3 {
- return InodeClusteringStats{}, fmt.Errorf("incorrect number of values for XFS inode clustering stats: %d", l)
- }
-
- return InodeClusteringStats{
- Iflush: us[0],
- Flush: us[1],
- FlushInode: us[2],
- }, nil
-}
-
-// VnodeStats builds a VnodeStats from a slice of uint32s.
-func vnodeStats(us []uint32) (VnodeStats, error) {
- // The attribute "Free" appears to not be available on older XFS
- // stats versions. Therefore, 7 or 8 elements may appear in
- // this slice.
- l := len(us)
- if l != 7 && l != 8 {
- return VnodeStats{}, fmt.Errorf("incorrect number of values for XFS vnode stats: %d", l)
- }
-
- s := VnodeStats{
- Active: us[0],
- Allocate: us[1],
- Get: us[2],
- Hold: us[3],
- Release: us[4],
- Reclaim: us[5],
- Remove: us[6],
- }
-
- // Skip adding free, unless it is present. The zero value will
- // be used in place of an actual count.
- if l == 7 {
- return s, nil
- }
-
- s.Free = us[7]
- return s, nil
-}
-
-// BufferStats builds a BufferStats from a slice of uint32s.
-func bufferStats(us []uint32) (BufferStats, error) {
- if l := len(us); l != 9 {
- return BufferStats{}, fmt.Errorf("incorrect number of values for XFS buffer stats: %d", l)
- }
-
- return BufferStats{
- Get: us[0],
- Create: us[1],
- GetLocked: us[2],
- GetLockedWaited: us[3],
- BusyLocked: us[4],
- MissLocked: us[5],
- PageRetries: us[6],
- PageFound: us[7],
- GetRead: us[8],
- }, nil
-}
-
-// ExtendedPrecisionStats builds an ExtendedPrecisionStats from a slice of uint32s.
-func extendedPrecisionStats(us []uint64) (ExtendedPrecisionStats, error) {
- if l := len(us); l != 3 {
- return ExtendedPrecisionStats{}, fmt.Errorf("incorrect number of values for XFS extended precision stats: %d", l)
- }
-
- return ExtendedPrecisionStats{
- FlushBytes: us[0],
- WriteBytes: us[1],
- ReadBytes: us[2],
- }, nil
-}
diff --git a/vendor/github.com/prometheus/procfs/xfs/xfs.go b/vendor/github.com/prometheus/procfs/xfs/xfs.go
deleted file mode 100644
index d86794b7c..000000000
--- a/vendor/github.com/prometheus/procfs/xfs/xfs.go
+++ /dev/null
@@ -1,163 +0,0 @@
-// Copyright 2017 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package xfs provides access to statistics exposed by the XFS filesystem.
-package xfs
-
-// Stats contains XFS filesystem runtime statistics, parsed from
-// /proc/fs/xfs/stat.
-//
-// The names and meanings of each statistic were taken from
-// http://xfs.org/index.php/Runtime_Stats and xfs_stats.h in the Linux
-// kernel source. Most counters are uint32s (same data types used in
-// xfs_stats.h), but some of the "extended precision stats" are uint64s.
-type Stats struct {
- // The name of the filesystem used to source these statistics.
- // If empty, this indicates aggregated statistics for all XFS
- // filesystems on the host.
- Name string
-
- ExtentAllocation ExtentAllocationStats
- AllocationBTree BTreeStats
- BlockMapping BlockMappingStats
- BlockMapBTree BTreeStats
- DirectoryOperation DirectoryOperationStats
- Transaction TransactionStats
- InodeOperation InodeOperationStats
- LogOperation LogOperationStats
- ReadWrite ReadWriteStats
- AttributeOperation AttributeOperationStats
- InodeClustering InodeClusteringStats
- Vnode VnodeStats
- Buffer BufferStats
- ExtendedPrecision ExtendedPrecisionStats
-}
-
-// ExtentAllocationStats contains statistics regarding XFS extent allocations.
-type ExtentAllocationStats struct {
- ExtentsAllocated uint32
- BlocksAllocated uint32
- ExtentsFreed uint32
- BlocksFreed uint32
-}
-
-// BTreeStats contains statistics regarding an XFS internal B-tree.
-type BTreeStats struct {
- Lookups uint32
- Compares uint32
- RecordsInserted uint32
- RecordsDeleted uint32
-}
-
-// BlockMappingStats contains statistics regarding XFS block maps.
-type BlockMappingStats struct {
- Reads uint32
- Writes uint32
- Unmaps uint32
- ExtentListInsertions uint32
- ExtentListDeletions uint32
- ExtentListLookups uint32
- ExtentListCompares uint32
-}
-
-// DirectoryOperationStats contains statistics regarding XFS directory entries.
-type DirectoryOperationStats struct {
- Lookups uint32
- Creates uint32
- Removes uint32
- Getdents uint32
-}
-
-// TransactionStats contains statistics regarding XFS metadata transactions.
-type TransactionStats struct {
- Sync uint32
- Async uint32
- Empty uint32
-}
-
-// InodeOperationStats contains statistics regarding XFS inode operations.
-type InodeOperationStats struct {
- Attempts uint32
- Found uint32
- Recycle uint32
- Missed uint32
- Duplicate uint32
- Reclaims uint32
- AttributeChange uint32
-}
-
-// LogOperationStats contains statistics regarding the XFS log buffer.
-type LogOperationStats struct {
- Writes uint32
- Blocks uint32
- NoInternalBuffers uint32
- Force uint32
- ForceSleep uint32
-}
-
-// ReadWriteStats contains statistics regarding the number of read and write
-// system calls for XFS filesystems.
-type ReadWriteStats struct {
- Read uint32
- Write uint32
-}
-
-// AttributeOperationStats contains statistics regarding manipulation of
-// XFS extended file attributes.
-type AttributeOperationStats struct {
- Get uint32
- Set uint32
- Remove uint32
- List uint32
-}
-
-// InodeClusteringStats contains statistics regarding XFS inode clustering
-// operations.
-type InodeClusteringStats struct {
- Iflush uint32
- Flush uint32
- FlushInode uint32
-}
-
-// VnodeStats contains statistics regarding XFS vnode operations.
-type VnodeStats struct {
- Active uint32
- Allocate uint32
- Get uint32
- Hold uint32
- Release uint32
- Reclaim uint32
- Remove uint32
- Free uint32
-}
-
-// BufferStats contains statistics regarding XFS read/write I/O buffers.
-type BufferStats struct {
- Get uint32
- Create uint32
- GetLocked uint32
- GetLockedWaited uint32
- BusyLocked uint32
- MissLocked uint32
- PageRetries uint32
- PageFound uint32
- GetRead uint32
-}
-
-// ExtendedPrecisionStats contains high precision counters used to track the
-// total number of bytes read, written, or flushed, during XFS operations.
-type ExtendedPrecisionStats struct {
- FlushBytes uint64
- WriteBytes uint64
- ReadBytes uint64
-}
diff --git a/vendor/github.com/prometheus/procfs/zoneinfo.go b/vendor/github.com/prometheus/procfs/zoneinfo.go
new file mode 100644
index 000000000..e941503d5
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/zoneinfo.go
@@ -0,0 +1,196 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build !windows
+
+package procfs
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "regexp"
+ "strings"
+
+ "github.com/prometheus/procfs/internal/util"
+)
+
+// Zoneinfo holds info parsed from /proc/zoneinfo.
+type Zoneinfo struct {
+ Node string
+ Zone string
+ NrFreePages *int64
+ Min *int64
+ Low *int64
+ High *int64
+ Scanned *int64
+ Spanned *int64
+ Present *int64
+ Managed *int64
+ NrActiveAnon *int64
+ NrInactiveAnon *int64
+ NrIsolatedAnon *int64
+ NrAnonPages *int64
+ NrAnonTransparentHugepages *int64
+ NrActiveFile *int64
+ NrInactiveFile *int64
+ NrIsolatedFile *int64
+ NrFilePages *int64
+ NrSlabReclaimable *int64
+ NrSlabUnreclaimable *int64
+ NrMlockStack *int64
+ NrKernelStack *int64
+ NrMapped *int64
+ NrDirty *int64
+ NrWriteback *int64
+ NrUnevictable *int64
+ NrShmem *int64
+ NrDirtied *int64
+ NrWritten *int64
+ NumaHit *int64
+ NumaMiss *int64
+ NumaForeign *int64
+ NumaInterleave *int64
+ NumaLocal *int64
+ NumaOther *int64
+ Protection []*int64
+}
+
+var nodeZoneRE = regexp.MustCompile(`(\d+), zone\s+(\w+)`)
+
+// Zoneinfo parses an zoneinfo-file (/proc/zoneinfo) and returns a slice of
+// structs containing the relevant info. More information available here:
+// https://www.kernel.org/doc/Documentation/sysctl/vm.txt
+func (fs FS) Zoneinfo() ([]Zoneinfo, error) {
+ data, err := ioutil.ReadFile(fs.proc.Path("zoneinfo"))
+ if err != nil {
+ return nil, fmt.Errorf("error reading zoneinfo %s: %s", fs.proc.Path("zoneinfo"), err)
+ }
+ zoneinfo, err := parseZoneinfo(data)
+ if err != nil {
+ return nil, fmt.Errorf("error parsing zoneinfo %s: %s", fs.proc.Path("zoneinfo"), err)
+ }
+ return zoneinfo, nil
+}
+
+func parseZoneinfo(zoneinfoData []byte) ([]Zoneinfo, error) {
+
+ zoneinfo := []Zoneinfo{}
+
+ zoneinfoBlocks := bytes.Split(zoneinfoData, []byte("\nNode"))
+ for _, block := range zoneinfoBlocks {
+ var zoneinfoElement Zoneinfo
+ lines := strings.Split(string(block), "\n")
+ for _, line := range lines {
+
+ if nodeZone := nodeZoneRE.FindStringSubmatch(line); nodeZone != nil {
+ zoneinfoElement.Node = nodeZone[1]
+ zoneinfoElement.Zone = nodeZone[2]
+ continue
+ }
+ if strings.HasPrefix(strings.TrimSpace(line), "per-node stats") {
+ zoneinfoElement.Zone = ""
+ continue
+ }
+ parts := strings.Fields(strings.TrimSpace(line))
+ if len(parts) < 2 {
+ continue
+ }
+ vp := util.NewValueParser(parts[1])
+ switch parts[0] {
+ case "nr_free_pages":
+ zoneinfoElement.NrFreePages = vp.PInt64()
+ case "min":
+ zoneinfoElement.Min = vp.PInt64()
+ case "low":
+ zoneinfoElement.Low = vp.PInt64()
+ case "high":
+ zoneinfoElement.High = vp.PInt64()
+ case "scanned":
+ zoneinfoElement.Scanned = vp.PInt64()
+ case "spanned":
+ zoneinfoElement.Spanned = vp.PInt64()
+ case "present":
+ zoneinfoElement.Present = vp.PInt64()
+ case "managed":
+ zoneinfoElement.Managed = vp.PInt64()
+ case "nr_active_anon":
+ zoneinfoElement.NrActiveAnon = vp.PInt64()
+ case "nr_inactive_anon":
+ zoneinfoElement.NrInactiveAnon = vp.PInt64()
+ case "nr_isolated_anon":
+ zoneinfoElement.NrIsolatedAnon = vp.PInt64()
+ case "nr_anon_pages":
+ zoneinfoElement.NrAnonPages = vp.PInt64()
+ case "nr_anon_transparent_hugepages":
+ zoneinfoElement.NrAnonTransparentHugepages = vp.PInt64()
+ case "nr_active_file":
+ zoneinfoElement.NrActiveFile = vp.PInt64()
+ case "nr_inactive_file":
+ zoneinfoElement.NrInactiveFile = vp.PInt64()
+ case "nr_isolated_file":
+ zoneinfoElement.NrIsolatedFile = vp.PInt64()
+ case "nr_file_pages":
+ zoneinfoElement.NrFilePages = vp.PInt64()
+ case "nr_slab_reclaimable":
+ zoneinfoElement.NrSlabReclaimable = vp.PInt64()
+ case "nr_slab_unreclaimable":
+ zoneinfoElement.NrSlabUnreclaimable = vp.PInt64()
+ case "nr_mlock_stack":
+ zoneinfoElement.NrMlockStack = vp.PInt64()
+ case "nr_kernel_stack":
+ zoneinfoElement.NrKernelStack = vp.PInt64()
+ case "nr_mapped":
+ zoneinfoElement.NrMapped = vp.PInt64()
+ case "nr_dirty":
+ zoneinfoElement.NrDirty = vp.PInt64()
+ case "nr_writeback":
+ zoneinfoElement.NrWriteback = vp.PInt64()
+ case "nr_unevictable":
+ zoneinfoElement.NrUnevictable = vp.PInt64()
+ case "nr_shmem":
+ zoneinfoElement.NrShmem = vp.PInt64()
+ case "nr_dirtied":
+ zoneinfoElement.NrDirtied = vp.PInt64()
+ case "nr_written":
+ zoneinfoElement.NrWritten = vp.PInt64()
+ case "numa_hit":
+ zoneinfoElement.NumaHit = vp.PInt64()
+ case "numa_miss":
+ zoneinfoElement.NumaMiss = vp.PInt64()
+ case "numa_foreign":
+ zoneinfoElement.NumaForeign = vp.PInt64()
+ case "numa_interleave":
+ zoneinfoElement.NumaInterleave = vp.PInt64()
+ case "numa_local":
+ zoneinfoElement.NumaLocal = vp.PInt64()
+ case "numa_other":
+ zoneinfoElement.NumaOther = vp.PInt64()
+ case "protection:":
+ protectionParts := strings.Split(line, ":")
+ protectionValues := strings.Replace(protectionParts[1], "(", "", 1)
+ protectionValues = strings.Replace(protectionValues, ")", "", 1)
+ protectionValues = strings.TrimSpace(protectionValues)
+ protectionStringMap := strings.Split(protectionValues, ", ")
+ val, err := util.ParsePInt64s(protectionStringMap)
+ if err == nil {
+ zoneinfoElement.Protection = val
+ }
+ }
+
+ }
+
+ zoneinfo = append(zoneinfo, zoneinfoElement)
+ }
+ return zoneinfo, nil
+}
diff --git a/vendor/google.golang.org/genproto/README.md b/vendor/google.golang.org/genproto/README.md
index 51d0bae5f..8b8e4ef8c 100644
--- a/vendor/google.golang.org/genproto/README.md
+++ b/vendor/google.golang.org/genproto/README.md
@@ -1,9 +1,12 @@
Go generated proto packages
===========================
-[![Build Status](https://travis-ci.org/google/go-genproto.svg?branch=master)](https://travis-ci.org/google/go-genproto)
[![GoDoc](https://godoc.org/google.golang.org/genproto?status.svg)](https://godoc.org/google.golang.org/genproto)
+```
+go get google.golang.org/genproto/...
+```
+
> **IMPORTANT** This repository is currently experimental. The structure
> of the contained packages is subject to change. Please see the original
> source repositories (listed below) to find out the status of the each
diff --git a/vendor/google.golang.org/genproto/go.mod b/vendor/google.golang.org/genproto/go.mod
new file mode 100644
index 000000000..c947b4a5e
--- /dev/null
+++ b/vendor/google.golang.org/genproto/go.mod
@@ -0,0 +1,11 @@
+module google.golang.org/genproto
+
+go 1.11
+
+require (
+ github.com/golang/protobuf v1.3.3
+ golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3
+ golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135
+ google.golang.org/grpc v1.27.0
+ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc
+)
diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go
index ec26060bc..01db4cfd0 100644
--- a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go
+++ b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go
@@ -1,22 +1,15 @@
-// Code generated by protoc-gen-go.
+// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/rpc/status.proto
-// DO NOT EDIT!
-/*
-Package status is a generated protocol buffer package.
-
-It is generated from these files:
- google/rpc/status.proto
-
-It has these top-level messages:
- Status
-*/
package status
-import proto "github.com/golang/protobuf/proto"
-import fmt "fmt"
-import math "math"
-import google_protobuf "github.com/golang/protobuf/ptypes/any"
+import (
+ fmt "fmt"
+ math "math"
+
+ proto "github.com/golang/protobuf/proto"
+ any "github.com/golang/protobuf/ptypes/any"
+)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
@@ -27,76 +20,54 @@ var _ = math.Inf
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
-// The `Status` type defines a logical error model that is suitable for different
-// programming environments, including REST APIs and RPC APIs. It is used by
-// [gRPC](https://github.com/grpc). The error model is designed to be:
-//
-// - Simple to use and understand for most users
-// - Flexible enough to meet unexpected needs
-//
-// # Overview
-//
-// The `Status` message contains three pieces of data: error code, error message,
-// and error details. The error code should be an enum value of
-// [google.rpc.Code][google.rpc.Code], but it may accept additional error codes if needed. The
-// error message should be a developer-facing English message that helps
-// developers *understand* and *resolve* the error. If a localized user-facing
-// error message is needed, put the localized message in the error details or
-// localize it in the client. The optional error details may contain arbitrary
-// information about the error. There is a predefined set of error detail types
-// in the package `google.rpc` which can be used for common error conditions.
-//
-// # Language mapping
-//
-// The `Status` message is the logical representation of the error model, but it
-// is not necessarily the actual wire format. When the `Status` message is
-// exposed in different client libraries and different wire protocols, it can be
-// mapped differently. For example, it will likely be mapped to some exceptions
-// in Java, but more likely mapped to some error codes in C.
-//
-// # Other uses
-//
-// The error model and the `Status` message can be used in a variety of
-// environments, either with or without APIs, to provide a
-// consistent developer experience across different environments.
-//
-// Example uses of this error model include:
-//
-// - Partial errors. If a service needs to return partial errors to the client,
-// it may embed the `Status` in the normal response to indicate the partial
-// errors.
+// The `Status` type defines a logical error model that is suitable for
+// different programming environments, including REST APIs and RPC APIs. It is
+// used by [gRPC](https://github.com/grpc). Each `Status` message contains
+// three pieces of data: error code, error message, and error details.
//
-// - Workflow errors. A typical workflow has multiple steps. Each step may
-// have a `Status` message for error reporting purpose.
-//
-// - Batch operations. If a client uses batch request and batch response, the
-// `Status` message should be used directly inside batch response, one for
-// each error sub-response.
-//
-// - Asynchronous operations. If an API call embeds asynchronous operation
-// results in its response, the status of those operations should be
-// represented directly using the `Status` message.
-//
-// - Logging. If some API errors are stored in logs, the message `Status` could
-// be used directly after any stripping needed for security/privacy reasons.
+// You can find out more about this error model and how to work with it in the
+// [API Design Guide](https://cloud.google.com/apis/design/errors).
type Status struct {
// The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code].
- Code int32 `protobuf:"varint,1,opt,name=code" json:"code,omitempty"`
+ Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"`
// A developer-facing error message, which should be in English. Any
// user-facing error message should be localized and sent in the
// [google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client.
- Message string `protobuf:"bytes,2,opt,name=message" json:"message,omitempty"`
- // A list of messages that carry the error details. There will be a
- // common set of message types for APIs to use.
- Details []*google_protobuf.Any `protobuf:"bytes,3,rep,name=details" json:"details,omitempty"`
+ Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
+ // A list of messages that carry the error details. There is a common set of
+ // message types for APIs to use.
+ Details []*any.Any `protobuf:"bytes,3,rep,name=details,proto3" json:"details,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Status) Reset() { *m = Status{} }
+func (m *Status) String() string { return proto.CompactTextString(m) }
+func (*Status) ProtoMessage() {}
+func (*Status) Descriptor() ([]byte, []int) {
+ return fileDescriptor_24d244abaf643bfe, []int{0}
+}
+
+func (m *Status) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Status.Unmarshal(m, b)
+}
+func (m *Status) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Status.Marshal(b, m, deterministic)
+}
+func (m *Status) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Status.Merge(m, src)
+}
+func (m *Status) XXX_Size() int {
+ return xxx_messageInfo_Status.Size(m)
+}
+func (m *Status) XXX_DiscardUnknown() {
+ xxx_messageInfo_Status.DiscardUnknown(m)
}
-func (m *Status) Reset() { *m = Status{} }
-func (m *Status) String() string { return proto.CompactTextString(m) }
-func (*Status) ProtoMessage() {}
-func (*Status) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
+var xxx_messageInfo_Status proto.InternalMessageInfo
func (m *Status) GetCode() int32 {
if m != nil {
@@ -112,7 +83,7 @@ func (m *Status) GetMessage() string {
return ""
}
-func (m *Status) GetDetails() []*google_protobuf.Any {
+func (m *Status) GetDetails() []*any.Any {
if m != nil {
return m.Details
}
@@ -123,10 +94,10 @@ func init() {
proto.RegisterType((*Status)(nil), "google.rpc.Status")
}
-func init() { proto.RegisterFile("google/rpc/status.proto", fileDescriptor0) }
+func init() { proto.RegisterFile("google/rpc/status.proto", fileDescriptor_24d244abaf643bfe) }
-var fileDescriptor0 = []byte{
- // 209 bytes of a gzipped FileDescriptorProto
+var fileDescriptor_24d244abaf643bfe = []byte{
+ // 212 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4f, 0xcf, 0xcf, 0x4f,
0xcf, 0x49, 0xd5, 0x2f, 0x2a, 0x48, 0xd6, 0x2f, 0x2e, 0x49, 0x2c, 0x29, 0x2d, 0xd6, 0x2b, 0x28,
0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x82, 0x48, 0xe8, 0x15, 0x15, 0x24, 0x4b, 0x49, 0x42, 0x15, 0x81,
@@ -135,10 +106,10 @@ var fileDescriptor0 = []byte{
0xc0, 0x6c, 0x21, 0x09, 0x2e, 0xf6, 0xdc, 0xd4, 0xe2, 0xe2, 0xc4, 0xf4, 0x54, 0x09, 0x26, 0x05,
0x46, 0x0d, 0xce, 0x20, 0x18, 0x57, 0x48, 0x8f, 0x8b, 0x3d, 0x25, 0xb5, 0x24, 0x31, 0x33, 0xa7,
0x58, 0x82, 0x59, 0x81, 0x59, 0x83, 0xdb, 0x48, 0x44, 0x0f, 0x6a, 0x21, 0xcc, 0x12, 0x3d, 0xc7,
- 0xbc, 0xca, 0x20, 0x98, 0x22, 0xa7, 0x38, 0x2e, 0xbe, 0xe4, 0xfc, 0x5c, 0x3d, 0x84, 0xa3, 0x9c,
+ 0xbc, 0xca, 0x20, 0x98, 0x22, 0xa7, 0x44, 0x2e, 0xbe, 0xe4, 0xfc, 0x5c, 0x3d, 0x84, 0xa3, 0x9c,
0xb8, 0x21, 0xf6, 0x06, 0x80, 0x94, 0x07, 0x30, 0x46, 0x99, 0x43, 0xa5, 0xd2, 0xf3, 0x73, 0x12,
0xf3, 0xd2, 0xf5, 0xf2, 0x8b, 0xd2, 0xf5, 0xd3, 0x53, 0xf3, 0xc0, 0x86, 0xe9, 0x43, 0xa4, 0x12,
- 0x0b, 0x32, 0x8b, 0x91, 0xfc, 0x69, 0x0d, 0xa1, 0x16, 0x31, 0x31, 0x07, 0x05, 0x38, 0x27, 0xb1,
- 0x81, 0x55, 0x1a, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0xa4, 0x53, 0xf0, 0x7c, 0x10, 0x01, 0x00,
- 0x00,
+ 0x0b, 0x32, 0x8b, 0x91, 0xfc, 0x69, 0x0d, 0xa1, 0x7e, 0x30, 0x32, 0x2e, 0x62, 0x62, 0x0e, 0x0a,
+ 0x70, 0x4e, 0x62, 0x03, 0x2b, 0x36, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xb9, 0x28, 0x45, 0xb1,
+ 0x13, 0x01, 0x00, 0x00,
}
diff --git a/vendor/google.golang.org/grpc/attributes/attributes.go b/vendor/google.golang.org/grpc/attributes/attributes.go
new file mode 100644
index 000000000..68ffc6201
--- /dev/null
+++ b/vendor/google.golang.org/grpc/attributes/attributes.go
@@ -0,0 +1,70 @@
+/*
+ *
+ * Copyright 2019 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package attributes defines a generic key/value store used in various gRPC
+// components.
+//
+// All APIs in this package are EXPERIMENTAL.
+package attributes
+
+import "fmt"
+
+// Attributes is an immutable struct for storing and retrieving generic
+// key/value pairs. Keys must be hashable, and users should define their own
+// types for keys.
+type Attributes struct {
+ m map[interface{}]interface{}
+}
+
+// New returns a new Attributes containing all key/value pairs in kvs. If the
+// same key appears multiple times, the last value overwrites all previous
+// values for that key. Panics if len(kvs) is not even.
+func New(kvs ...interface{}) *Attributes {
+ if len(kvs)%2 != 0 {
+ panic(fmt.Sprintf("attributes.New called with unexpected input: len(kvs) = %v", len(kvs)))
+ }
+ a := &Attributes{m: make(map[interface{}]interface{}, len(kvs)/2)}
+ for i := 0; i < len(kvs)/2; i++ {
+ a.m[kvs[i*2]] = kvs[i*2+1]
+ }
+ return a
+}
+
+// WithValues returns a new Attributes containing all key/value pairs in a and
+// kvs. Panics if len(kvs) is not even. If the same key appears multiple
+// times, the last value overwrites all previous values for that key. To
+// remove an existing key, use a nil value.
+func (a *Attributes) WithValues(kvs ...interface{}) *Attributes {
+ if len(kvs)%2 != 0 {
+ panic(fmt.Sprintf("attributes.New called with unexpected input: len(kvs) = %v", len(kvs)))
+ }
+ n := &Attributes{m: make(map[interface{}]interface{}, len(a.m)+len(kvs)/2)}
+ for k, v := range a.m {
+ n.m[k] = v
+ }
+ for i := 0; i < len(kvs)/2; i++ {
+ n.m[kvs[i*2]] = kvs[i*2+1]
+ }
+ return n
+}
+
+// Value returns the value associated with these attributes for key, or nil if
+// no value is associated with key.
+func (a *Attributes) Value(key interface{}) interface{} {
+ return a.m[key]
+}
diff --git a/vendor/google.golang.org/grpc/backoff.go b/vendor/google.golang.org/grpc/backoff.go
index 97c6e2568..ff7c3ee6f 100644
--- a/vendor/google.golang.org/grpc/backoff.go
+++ b/vendor/google.golang.org/grpc/backoff.go
@@ -23,16 +23,36 @@ package grpc
import (
"time"
+
+ "google.golang.org/grpc/backoff"
)
// DefaultBackoffConfig uses values specified for backoff in
// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
+//
+// Deprecated: use ConnectParams instead. Will be supported throughout 1.x.
var DefaultBackoffConfig = BackoffConfig{
MaxDelay: 120 * time.Second,
}
// BackoffConfig defines the parameters for the default gRPC backoff strategy.
+//
+// Deprecated: use ConnectParams instead. Will be supported throughout 1.x.
type BackoffConfig struct {
// MaxDelay is the upper bound of backoff delay.
MaxDelay time.Duration
}
+
+// ConnectParams defines the parameters for connecting and retrying. Users are
+// encouraged to use this instead of the BackoffConfig type defined above. See
+// here for more details:
+// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
+//
+// This API is EXPERIMENTAL.
+type ConnectParams struct {
+ // Backoff specifies the configuration options for connection backoff.
+ Backoff backoff.Config
+ // MinConnectTimeout is the minimum amount of time we are willing to give a
+ // connection to complete.
+ MinConnectTimeout time.Duration
+}
diff --git a/vendor/google.golang.org/grpc/backoff/backoff.go b/vendor/google.golang.org/grpc/backoff/backoff.go
new file mode 100644
index 000000000..0787d0b50
--- /dev/null
+++ b/vendor/google.golang.org/grpc/backoff/backoff.go
@@ -0,0 +1,52 @@
+/*
+ *
+ * Copyright 2019 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package backoff provides configuration options for backoff.
+//
+// More details can be found at:
+// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
+//
+// All APIs in this package are experimental.
+package backoff
+
+import "time"
+
+// Config defines the configuration options for backoff.
+type Config struct {
+ // BaseDelay is the amount of time to backoff after the first failure.
+ BaseDelay time.Duration
+ // Multiplier is the factor with which to multiply backoffs after a
+ // failed retry. Should ideally be greater than 1.
+ Multiplier float64
+ // Jitter is the factor with which backoffs are randomized.
+ Jitter float64
+ // MaxDelay is the upper bound of backoff delay.
+ MaxDelay time.Duration
+}
+
+// DefaultConfig is a backoff configuration with the default values specfied
+// at https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
+//
+// This should be useful for callers who want to configure backoff with
+// non-default values only for a subset of the options.
+var DefaultConfig = Config{
+ BaseDelay: 1.0 * time.Second,
+ Multiplier: 1.6,
+ Jitter: 0.2,
+ MaxDelay: 120 * time.Second,
+}
diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go
index c266f4ec1..9258858ed 100644
--- a/vendor/google.golang.org/grpc/balancer/balancer.go
+++ b/vendor/google.golang.org/grpc/balancer/balancer.go
@@ -117,6 +117,15 @@ type NewSubConnOptions struct {
HealthCheckEnabled bool
}
+// State contains the balancer's state relevant to the gRPC ClientConn.
+type State struct {
+ // State contains the connectivity state of the balancer, which is used to
+ // determine the state of the ClientConn.
+ ConnectivityState connectivity.State
+ // Picker is used to choose connections (SubConns) for RPCs.
+ Picker V2Picker
+}
+
// ClientConn represents a gRPC ClientConn.
//
// This interface is to be implemented by gRPC. Users should not need a
@@ -137,10 +146,19 @@ type ClientConn interface {
//
// gRPC will update the connectivity state of the ClientConn, and will call pick
// on the new picker to pick new SubConn.
+ //
+ // Deprecated: use UpdateState instead
UpdateBalancerState(s connectivity.State, p Picker)
+ // UpdateState notifies gRPC that the balancer's internal state has
+ // changed.
+ //
+ // gRPC will update the connectivity state of the ClientConn, and will call pick
+ // on the new picker to pick new SubConns.
+ UpdateState(State)
+
// ResolveNow is called by balancer to notify gRPC to do a name resolving.
- ResolveNow(resolver.ResolveNowOption)
+ ResolveNow(resolver.ResolveNowOptions)
// Target returns the dial target for this ClientConn.
//
@@ -185,11 +203,14 @@ type ConfigParser interface {
ParseConfig(LoadBalancingConfigJSON json.RawMessage) (serviceconfig.LoadBalancingConfig, error)
}
-// PickOptions contains addition information for the Pick operation.
-type PickOptions struct {
+// PickInfo contains additional information for the Pick operation.
+type PickInfo struct {
// FullMethodName is the method name that NewClientStream() is called
// with. The canonical format is /service/Method.
FullMethodName string
+ // Ctx is the RPC's context, and may contain relevant RPC-level information
+ // like the outgoing header metadata.
+ Ctx context.Context
}
// DoneInfo contains additional information for done.
@@ -215,7 +236,7 @@ var (
ErrNoSubConnAvailable = errors.New("no SubConn is available")
// ErrTransientFailure indicates all SubConns are in TransientFailure.
// WaitForReady RPCs will block, non-WaitForReady RPCs will fail.
- ErrTransientFailure = errors.New("all SubConns are in TransientFailure")
+ ErrTransientFailure = TransientFailureError(errors.New("all SubConns are in TransientFailure"))
)
// Picker is used by gRPC to pick a SubConn to send an RPC.
@@ -223,6 +244,8 @@ var (
// internal state has changed.
//
// The pickers used by gRPC can be updated by ClientConn.UpdateBalancerState().
+//
+// Deprecated: use V2Picker instead
type Picker interface {
// Pick returns the SubConn to be used to send the RPC.
// The returned SubConn must be one returned by NewSubConn().
@@ -243,18 +266,76 @@ type Picker interface {
//
// If the returned error is not nil:
// - If the error is ErrNoSubConnAvailable, gRPC will block until UpdateBalancerState()
- // - If the error is ErrTransientFailure:
+ // - If the error is ErrTransientFailure or implements IsTransientFailure()
+ // bool, returning true:
// - If the RPC is wait-for-ready, gRPC will block until UpdateBalancerState()
// is called to pick again;
// - Otherwise, RPC will fail with unavailable error.
// - Else (error is other non-nil error):
- // - The RPC will fail with unavailable error.
+ // - The RPC will fail with the error's status code, or Unknown if it is
+ // not a status error.
//
// The returned done() function will be called once the rpc has finished,
// with the final status of that RPC. If the SubConn returned is not a
// valid SubConn type, done may not be called. done may be nil if balancer
// doesn't care about the RPC status.
- Pick(ctx context.Context, opts PickOptions) (conn SubConn, done func(DoneInfo), err error)
+ Pick(ctx context.Context, info PickInfo) (conn SubConn, done func(DoneInfo), err error)
+}
+
+// PickResult contains information related to a connection chosen for an RPC.
+type PickResult struct {
+ // SubConn is the connection to use for this pick, if its state is Ready.
+ // If the state is not Ready, gRPC will block the RPC until a new Picker is
+ // provided by the balancer (using ClientConn.UpdateState). The SubConn
+ // must be one returned by ClientConn.NewSubConn.
+ SubConn SubConn
+
+ // Done is called when the RPC is completed. If the SubConn is not ready,
+ // this will be called with a nil parameter. If the SubConn is not a valid
+ // type, Done may not be called. May be nil if the balancer does not wish
+ // to be notified when the RPC completes.
+ Done func(DoneInfo)
+}
+
+type transientFailureError struct {
+ error
+}
+
+func (e *transientFailureError) IsTransientFailure() bool { return true }
+
+// TransientFailureError wraps err in an error implementing
+// IsTransientFailure() bool, returning true.
+func TransientFailureError(err error) error {
+ return &transientFailureError{error: err}
+}
+
+// V2Picker is used by gRPC to pick a SubConn to send an RPC.
+// Balancer is expected to generate a new picker from its snapshot every time its
+// internal state has changed.
+//
+// The pickers used by gRPC can be updated by ClientConn.UpdateBalancerState().
+type V2Picker interface {
+ // Pick returns the connection to use for this RPC and related information.
+ //
+ // Pick should not block. If the balancer needs to do I/O or any blocking
+ // or time-consuming work to service this call, it should return
+ // ErrNoSubConnAvailable, and the Pick call will be repeated by gRPC when
+ // the Picker is updated (using ClientConn.UpdateState).
+ //
+ // If an error is returned:
+ //
+ // - If the error is ErrNoSubConnAvailable, gRPC will block until a new
+ // Picker is provided by the balancer (using ClientConn.UpdateState).
+ //
+ // - If the error implements IsTransientFailure() bool, returning true,
+ // wait for ready RPCs will wait, but non-wait for ready RPCs will be
+ // terminated with this error's Error() string and status code
+ // Unavailable.
+ //
+ // - Any other errors terminate all RPCs with the code and message
+ // provided. If the error is not a status error, it will be converted by
+ // gRPC to a status error with code Unknown.
+ Pick(info PickInfo) (PickResult, error)
}
// Balancer takes input from gRPC, manages SubConns, and collects and aggregates
@@ -292,8 +373,11 @@ type Balancer interface {
// SubConnState describes the state of a SubConn.
type SubConnState struct {
+ // ConnectivityState is the connectivity state of the SubConn.
ConnectivityState connectivity.State
- // TODO: add last connection error
+ // ConnectionError is set if the ConnectivityState is TransientFailure,
+ // describing the reason the SubConn failed. Otherwise, it is nil.
+ ConnectionError error
}
// ClientConnState describes the state of a ClientConn relevant to the
@@ -305,14 +389,23 @@ type ClientConnState struct {
BalancerConfig serviceconfig.LoadBalancingConfig
}
+// ErrBadResolverState may be returned by UpdateClientConnState to indicate a
+// problem with the provided name resolver data.
+var ErrBadResolverState = errors.New("bad resolver state")
+
// V2Balancer is defined for documentation purposes. If a Balancer also
// implements V2Balancer, its UpdateClientConnState method will be called
// instead of HandleResolvedAddrs and its UpdateSubConnState will be called
// instead of HandleSubConnStateChange.
type V2Balancer interface {
// UpdateClientConnState is called by gRPC when the state of the ClientConn
- // changes.
- UpdateClientConnState(ClientConnState)
+ // changes. If the error returned is ErrBadResolverState, the ClientConn
+ // will begin calling ResolveNow on the active name resolver with
+ // exponential backoff until a subsequent call to UpdateClientConnState
+ // returns a nil error. Any other errors are currently ignored.
+ UpdateClientConnState(ClientConnState) error
+ // ResolverError is called by gRPC when the name resolver reports an error.
+ ResolverError(error)
// UpdateSubConnState is called by gRPC when the state of a SubConn
// changes.
UpdateSubConnState(SubConn, SubConnState)
@@ -326,9 +419,8 @@ type V2Balancer interface {
//
// It's not thread safe.
type ConnectivityStateEvaluator struct {
- numReady uint64 // Number of addrConns in ready state.
- numConnecting uint64 // Number of addrConns in connecting state.
- numTransientFailure uint64 // Number of addrConns in transientFailure.
+ numReady uint64 // Number of addrConns in ready state.
+ numConnecting uint64 // Number of addrConns in connecting state.
}
// RecordTransition records state change happening in subConn and based on that
@@ -348,8 +440,6 @@ func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState conne
cse.numReady += updateVal
case connectivity.Connecting:
cse.numConnecting += updateVal
- case connectivity.TransientFailure:
- cse.numTransientFailure += updateVal
}
}
diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go
index 1af88f0a3..d7d72918a 100644
--- a/vendor/google.golang.org/grpc/balancer/base/balancer.go
+++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go
@@ -20,6 +20,8 @@ package base
import (
"context"
+ "errors"
+ "fmt"
"google.golang.org/grpc/balancer"
"google.golang.org/grpc/connectivity"
@@ -28,34 +30,44 @@ import (
)
type baseBuilder struct {
- name string
- pickerBuilder PickerBuilder
- config Config
+ name string
+ pickerBuilder PickerBuilder
+ v2PickerBuilder V2PickerBuilder
+ config Config
}
func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer {
- return &baseBalancer{
- cc: cc,
- pickerBuilder: bb.pickerBuilder,
+ bal := &baseBalancer{
+ cc: cc,
+ pickerBuilder: bb.pickerBuilder,
+ v2PickerBuilder: bb.v2PickerBuilder,
subConns: make(map[resolver.Address]balancer.SubConn),
scStates: make(map[balancer.SubConn]connectivity.State),
csEvltr: &balancer.ConnectivityStateEvaluator{},
- // Initialize picker to a picker that always return
- // ErrNoSubConnAvailable, because when state of a SubConn changes, we
- // may call UpdateBalancerState with this picker.
- picker: NewErrPicker(balancer.ErrNoSubConnAvailable),
- config: bb.config,
+ config: bb.config,
}
+ // Initialize picker to a picker that always returns
+ // ErrNoSubConnAvailable, because when state of a SubConn changes, we
+ // may call UpdateState with this picker.
+ if bb.pickerBuilder != nil {
+ bal.picker = NewErrPicker(balancer.ErrNoSubConnAvailable)
+ } else {
+ bal.v2Picker = NewErrPickerV2(balancer.ErrNoSubConnAvailable)
+ }
+ return bal
}
func (bb *baseBuilder) Name() string {
return bb.name
}
+var _ balancer.V2Balancer = (*baseBalancer)(nil) // Assert that we implement V2Balancer
+
type baseBalancer struct {
- cc balancer.ClientConn
- pickerBuilder PickerBuilder
+ cc balancer.ClientConn
+ pickerBuilder PickerBuilder
+ v2PickerBuilder V2PickerBuilder
csEvltr *balancer.ConnectivityStateEvaluator
state connectivity.State
@@ -63,19 +75,50 @@ type baseBalancer struct {
subConns map[resolver.Address]balancer.SubConn
scStates map[balancer.SubConn]connectivity.State
picker balancer.Picker
+ v2Picker balancer.V2Picker
config Config
+
+ resolverErr error // the last error reported by the resolver; cleared on successful resolution
+ connErr error // the last connection error; cleared upon leaving TransientFailure
}
func (b *baseBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) {
panic("not implemented")
}
-func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) {
+func (b *baseBalancer) ResolverError(err error) {
+ b.resolverErr = err
+ if len(b.subConns) == 0 {
+ b.state = connectivity.TransientFailure
+ }
+ if b.state != connectivity.TransientFailure {
+ // The picker will not change since the balancer does not currently
+ // report an error.
+ return
+ }
+ b.regeneratePicker()
+ if b.picker != nil {
+ b.cc.UpdateBalancerState(b.state, b.picker)
+ } else {
+ b.cc.UpdateState(balancer.State{
+ ConnectivityState: b.state,
+ Picker: b.v2Picker,
+ })
+ }
+}
+
+func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error {
// TODO: handle s.ResolverState.Err (log if not nil) once implemented.
// TODO: handle s.ResolverState.ServiceConfig?
if grpclog.V(2) {
grpclog.Infoln("base.baseBalancer: got new ClientConn state: ", s)
}
+ if len(s.ResolverState.Addresses) == 0 {
+ b.ResolverError(errors.New("produced zero addresses"))
+ return balancer.ErrBadResolverState
+ }
+ // Successful resolution; clear resolver error and ensure we return nil.
+ b.resolverErr = nil
// addrsSet is the set converted from addrs, it's used for quick lookup of an address.
addrsSet := make(map[resolver.Address]struct{})
for _, a := range s.ResolverState.Addresses {
@@ -101,26 +144,57 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) {
// The entry will be deleted in HandleSubConnStateChange.
}
}
+ return nil
+}
+
+// mergeErrors builds an error from the last connection error and the last
+// resolver error. Must only be called if b.state is TransientFailure.
+func (b *baseBalancer) mergeErrors() error {
+ // connErr must always be non-nil unless there are no SubConns, in which
+ // case resolverErr must be non-nil.
+ if b.connErr == nil {
+ return fmt.Errorf("last resolver error: %v", b.resolverErr)
+ }
+ if b.resolverErr == nil {
+ return fmt.Errorf("last connection error: %v", b.connErr)
+ }
+ return fmt.Errorf("last connection error: %v; last resolver error: %v", b.connErr, b.resolverErr)
}
// regeneratePicker takes a snapshot of the balancer, and generates a picker
// from it. The picker is
-// - errPicker with ErrTransientFailure if the balancer is in TransientFailure,
+// - errPicker if the balancer is in TransientFailure,
// - built by the pickerBuilder with all READY SubConns otherwise.
func (b *baseBalancer) regeneratePicker() {
if b.state == connectivity.TransientFailure {
- b.picker = NewErrPicker(balancer.ErrTransientFailure)
+ if b.pickerBuilder != nil {
+ b.picker = NewErrPicker(balancer.ErrTransientFailure)
+ } else {
+ b.v2Picker = NewErrPickerV2(balancer.TransientFailureError(b.mergeErrors()))
+ }
return
}
- readySCs := make(map[resolver.Address]balancer.SubConn)
+ if b.pickerBuilder != nil {
+ readySCs := make(map[resolver.Address]balancer.SubConn)
- // Filter out all ready SCs from full subConn map.
- for addr, sc := range b.subConns {
- if st, ok := b.scStates[sc]; ok && st == connectivity.Ready {
- readySCs[addr] = sc
+ // Filter out all ready SCs from full subConn map.
+ for addr, sc := range b.subConns {
+ if st, ok := b.scStates[sc]; ok && st == connectivity.Ready {
+ readySCs[addr] = sc
+ }
}
+ b.picker = b.pickerBuilder.Build(readySCs)
+ } else {
+ readySCs := make(map[balancer.SubConn]SubConnInfo)
+
+ // Filter out all ready SCs from full subConn map.
+ for addr, sc := range b.subConns {
+ if st, ok := b.scStates[sc]; ok && st == connectivity.Ready {
+ readySCs[sc] = SubConnInfo{Address: addr}
+ }
+ }
+ b.v2Picker = b.v2PickerBuilder.Build(PickerBuildInfo{ReadySCs: readySCs})
}
- b.picker = b.pickerBuilder.Build(readySCs)
}
func (b *baseBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
@@ -152,6 +226,9 @@ func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Su
oldAggrState := b.state
b.state = b.csEvltr.RecordTransition(oldS, s)
+ // Set or clear the last connection error accordingly.
+ b.connErr = state.ConnectionError
+
// Regenerate picker when one of the following happens:
// - this sc became ready from not-ready
// - this sc became not-ready from ready
@@ -162,7 +239,11 @@ func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Su
b.regeneratePicker()
}
- b.cc.UpdateBalancerState(b.state, b.picker)
+ if b.picker != nil {
+ b.cc.UpdateBalancerState(b.state, b.picker)
+ } else {
+ b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.v2Picker})
+ }
}
// Close is a nop because base balancer doesn't have internal state to clean up,
@@ -179,6 +260,19 @@ type errPicker struct {
err error // Pick() always returns this err.
}
-func (p *errPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
+func (p *errPicker) Pick(context.Context, balancer.PickInfo) (balancer.SubConn, func(balancer.DoneInfo), error) {
return nil, nil, p.err
}
+
+// NewErrPickerV2 returns a V2Picker that always returns err on Pick().
+func NewErrPickerV2(err error) balancer.V2Picker {
+ return &errPickerV2{err: err}
+}
+
+type errPickerV2 struct {
+ err error // Pick() always returns this err.
+}
+
+func (p *errPickerV2) Pick(info balancer.PickInfo) (balancer.PickResult, error) {
+ return balancer.PickResult{}, p.err
+}
diff --git a/vendor/google.golang.org/grpc/balancer/base/base.go b/vendor/google.golang.org/grpc/balancer/base/base.go
index 34b1f2994..4192918b9 100644
--- a/vendor/google.golang.org/grpc/balancer/base/base.go
+++ b/vendor/google.golang.org/grpc/balancer/base/base.go
@@ -42,6 +42,26 @@ type PickerBuilder interface {
Build(readySCs map[resolver.Address]balancer.SubConn) balancer.Picker
}
+// V2PickerBuilder creates balancer.V2Picker.
+type V2PickerBuilder interface {
+ // Build returns a picker that will be used by gRPC to pick a SubConn.
+ Build(info PickerBuildInfo) balancer.V2Picker
+}
+
+// PickerBuildInfo contains information needed by the picker builder to
+// construct a picker.
+type PickerBuildInfo struct {
+ // ReadySCs is a map from all ready SubConns to the Addresses used to
+ // create them.
+ ReadySCs map[balancer.SubConn]SubConnInfo
+}
+
+// SubConnInfo contains information about a SubConn created by the base
+// balancer.
+type SubConnInfo struct {
+ Address resolver.Address // the address used to create this SubConn
+}
+
// NewBalancerBuilder returns a balancer builder. The balancers
// built by this builder will use the picker builder to build pickers.
func NewBalancerBuilder(name string, pb PickerBuilder) balancer.Builder {
@@ -62,3 +82,12 @@ func NewBalancerBuilderWithConfig(name string, pb PickerBuilder, config Config)
config: config,
}
}
+
+// NewBalancerBuilderV2 returns a base balancer builder configured by the provided config.
+func NewBalancerBuilderV2(name string, pb V2PickerBuilder, config Config) balancer.Builder {
+ return &baseBuilder{
+ name: name,
+ v2PickerBuilder: pb,
+ config: config,
+ }
+}
diff --git a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go
index 29f7a4ddd..d4d645501 100644
--- a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go
+++ b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go
@@ -22,14 +22,12 @@
package roundrobin
import (
- "context"
"sync"
"google.golang.org/grpc/balancer"
"google.golang.org/grpc/balancer/base"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/internal/grpcrand"
- "google.golang.org/grpc/resolver"
)
// Name is the name of round_robin balancer.
@@ -37,7 +35,7 @@ const Name = "round_robin"
// newBuilder creates a new roundrobin balancer builder.
func newBuilder() balancer.Builder {
- return base.NewBalancerBuilderWithConfig(Name, &rrPickerBuilder{}, base.Config{HealthCheck: true})
+ return base.NewBalancerBuilderV2(Name, &rrPickerBuilder{}, base.Config{HealthCheck: true})
}
func init() {
@@ -46,13 +44,13 @@ func init() {
type rrPickerBuilder struct{}
-func (*rrPickerBuilder) Build(readySCs map[resolver.Address]balancer.SubConn) balancer.Picker {
- grpclog.Infof("roundrobinPicker: newPicker called with readySCs: %v", readySCs)
- if len(readySCs) == 0 {
- return base.NewErrPicker(balancer.ErrNoSubConnAvailable)
+func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.V2Picker {
+ grpclog.Infof("roundrobinPicker: newPicker called with info: %v", info)
+ if len(info.ReadySCs) == 0 {
+ return base.NewErrPickerV2(balancer.ErrNoSubConnAvailable)
}
var scs []balancer.SubConn
- for _, sc := range readySCs {
+ for sc := range info.ReadySCs {
scs = append(scs, sc)
}
return &rrPicker{
@@ -74,10 +72,10 @@ type rrPicker struct {
next int
}
-func (p *rrPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
+func (p *rrPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) {
p.mu.Lock()
sc := p.subConns[p.next]
p.next = (p.next + 1) % len(p.subConns)
p.mu.Unlock()
- return sc, nil, nil
+ return balancer.PickResult{SubConn: sc}, nil
}
diff --git a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go
index 8df4095ca..824f28e74 100644
--- a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go
+++ b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go
@@ -25,6 +25,8 @@ import (
"google.golang.org/grpc/balancer"
"google.golang.org/grpc/connectivity"
"google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/internal/buffer"
+ "google.golang.org/grpc/internal/grpcsync"
"google.golang.org/grpc/resolver"
)
@@ -32,64 +34,17 @@ import (
type scStateUpdate struct {
sc balancer.SubConn
state connectivity.State
-}
-
-// scStateUpdateBuffer is an unbounded channel for scStateChangeTuple.
-// TODO make a general purpose buffer that uses interface{}.
-type scStateUpdateBuffer struct {
- c chan *scStateUpdate
- mu sync.Mutex
- backlog []*scStateUpdate
-}
-
-func newSCStateUpdateBuffer() *scStateUpdateBuffer {
- return &scStateUpdateBuffer{
- c: make(chan *scStateUpdate, 1),
- }
-}
-
-func (b *scStateUpdateBuffer) put(t *scStateUpdate) {
- b.mu.Lock()
- defer b.mu.Unlock()
- if len(b.backlog) == 0 {
- select {
- case b.c <- t:
- return
- default:
- }
- }
- b.backlog = append(b.backlog, t)
-}
-
-func (b *scStateUpdateBuffer) load() {
- b.mu.Lock()
- defer b.mu.Unlock()
- if len(b.backlog) > 0 {
- select {
- case b.c <- b.backlog[0]:
- b.backlog[0] = nil
- b.backlog = b.backlog[1:]
- default:
- }
- }
-}
-
-// get returns the channel that the scStateUpdate will be sent to.
-//
-// Upon receiving, the caller should call load to send another
-// scStateChangeTuple onto the channel if there is any.
-func (b *scStateUpdateBuffer) get() <-chan *scStateUpdate {
- return b.c
+ err error
}
// ccBalancerWrapper is a wrapper on top of cc for balancers.
// It implements balancer.ClientConn interface.
type ccBalancerWrapper struct {
- cc *ClientConn
- balancer balancer.Balancer
- stateChangeQueue *scStateUpdateBuffer
- ccUpdateCh chan *balancer.ClientConnState
- done chan struct{}
+ cc *ClientConn
+ balancerMu sync.Mutex // synchronizes calls to the balancer
+ balancer balancer.Balancer
+ scBuffer *buffer.Unbounded
+ done *grpcsync.Event
mu sync.Mutex
subConns map[*acBalancerWrapper]struct{}
@@ -97,11 +52,10 @@ type ccBalancerWrapper struct {
func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.BuildOptions) *ccBalancerWrapper {
ccb := &ccBalancerWrapper{
- cc: cc,
- stateChangeQueue: newSCStateUpdateBuffer(),
- ccUpdateCh: make(chan *balancer.ClientConnState, 1),
- done: make(chan struct{}),
- subConns: make(map[*acBalancerWrapper]struct{}),
+ cc: cc,
+ scBuffer: buffer.NewUnbounded(),
+ done: grpcsync.NewEvent(),
+ subConns: make(map[*acBalancerWrapper]struct{}),
}
go ccb.watcher()
ccb.balancer = b.Build(ccb, bopts)
@@ -113,36 +67,23 @@ func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.Bui
func (ccb *ccBalancerWrapper) watcher() {
for {
select {
- case t := <-ccb.stateChangeQueue.get():
- ccb.stateChangeQueue.load()
- select {
- case <-ccb.done:
- ccb.balancer.Close()
- return
- default:
+ case t := <-ccb.scBuffer.Get():
+ ccb.scBuffer.Load()
+ if ccb.done.HasFired() {
+ break
}
+ ccb.balancerMu.Lock()
+ su := t.(*scStateUpdate)
if ub, ok := ccb.balancer.(balancer.V2Balancer); ok {
- ub.UpdateSubConnState(t.sc, balancer.SubConnState{ConnectivityState: t.state})
+ ub.UpdateSubConnState(su.sc, balancer.SubConnState{ConnectivityState: su.state, ConnectionError: su.err})
} else {
- ccb.balancer.HandleSubConnStateChange(t.sc, t.state)
+ ccb.balancer.HandleSubConnStateChange(su.sc, su.state)
}
- case s := <-ccb.ccUpdateCh:
- select {
- case <-ccb.done:
- ccb.balancer.Close()
- return
- default:
- }
- if ub, ok := ccb.balancer.(balancer.V2Balancer); ok {
- ub.UpdateClientConnState(*s)
- } else {
- ccb.balancer.HandleResolvedAddrs(s.ResolverState.Addresses, nil)
- }
- case <-ccb.done:
+ ccb.balancerMu.Unlock()
+ case <-ccb.done.Done():
}
- select {
- case <-ccb.done:
+ if ccb.done.HasFired() {
ccb.balancer.Close()
ccb.mu.Lock()
scs := ccb.subConns
@@ -151,19 +92,17 @@ func (ccb *ccBalancerWrapper) watcher() {
for acbw := range scs {
ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain)
}
- ccb.UpdateBalancerState(connectivity.Connecting, nil)
+ ccb.UpdateState(balancer.State{ConnectivityState: connectivity.Connecting, Picker: nil})
return
- default:
}
- ccb.cc.firstResolveEvent.Fire()
}
}
func (ccb *ccBalancerWrapper) close() {
- close(ccb.done)
+ ccb.done.Fire()
}
-func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
+func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) {
// When updating addresses for a SubConn, if the address in use is not in
// the new addresses, the old ac will be tearDown() and a new ac will be
// created. tearDown() generates a state change with Shutdown state, we
@@ -174,30 +113,29 @@ func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s co
if sc == nil {
return
}
- ccb.stateChangeQueue.put(&scStateUpdate{
+ ccb.scBuffer.Put(&scStateUpdate{
sc: sc,
state: s,
+ err: err,
})
}
-func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) {
- if ccb.cc.curBalancerName != grpclbName {
- // Filter any grpclb addresses since we don't have the grpclb balancer.
- s := &ccs.ResolverState
- for i := 0; i < len(s.Addresses); {
- if s.Addresses[i].Type == resolver.GRPCLB {
- copy(s.Addresses[i:], s.Addresses[i+1:])
- s.Addresses = s.Addresses[:len(s.Addresses)-1]
- continue
- }
- i++
- }
+func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error {
+ ccb.balancerMu.Lock()
+ defer ccb.balancerMu.Unlock()
+ if ub, ok := ccb.balancer.(balancer.V2Balancer); ok {
+ return ub.UpdateClientConnState(*ccs)
}
- select {
- case <-ccb.ccUpdateCh:
- default:
+ ccb.balancer.HandleResolvedAddrs(ccs.ResolverState.Addresses, nil)
+ return nil
+}
+
+func (ccb *ccBalancerWrapper) resolverError(err error) {
+ if ub, ok := ccb.balancer.(balancer.V2Balancer); ok {
+ ccb.balancerMu.Lock()
+ ub.ResolverError(err)
+ ccb.balancerMu.Unlock()
}
- ccb.ccUpdateCh <- ccs
}
func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) {
@@ -250,7 +188,22 @@ func (ccb *ccBalancerWrapper) UpdateBalancerState(s connectivity.State, p balanc
ccb.cc.csMgr.updateState(s)
}
-func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOption) {
+func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) {
+ ccb.mu.Lock()
+ defer ccb.mu.Unlock()
+ if ccb.subConns == nil {
+ return
+ }
+ // Update picker before updating state. Even though the ordering here does
+ // not matter, it can lead to multiple calls of Pick in the common start-up
+ // case where we wait for ready and then perform an RPC. If the picker is
+ // updated later, we could call the "connecting" picker when the state is
+ // updated, and then call the "ready" picker after the picker gets updated.
+ ccb.cc.blockingpicker.updatePickerV2(s.Picker)
+ ccb.cc.csMgr.updateState(s.ConnectivityState)
+}
+
+func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOptions) {
ccb.cc.resolveNow(o)
}
diff --git a/vendor/google.golang.org/grpc/balancer_v1_wrapper.go b/vendor/google.golang.org/grpc/balancer_v1_wrapper.go
index 66e9a44ac..db04b08b8 100644
--- a/vendor/google.golang.org/grpc/balancer_v1_wrapper.go
+++ b/vendor/google.golang.org/grpc/balancer_v1_wrapper.go
@@ -19,7 +19,6 @@
package grpc
import (
- "context"
"sync"
"google.golang.org/grpc/balancer"
@@ -49,7 +48,7 @@ func (bwb *balancerWrapperBuilder) Build(cc balancer.ClientConn, opts balancer.B
csEvltr: &balancer.ConnectivityStateEvaluator{},
state: connectivity.Idle,
}
- cc.UpdateBalancerState(connectivity.Idle, bw)
+ cc.UpdateState(balancer.State{ConnectivityState: connectivity.Idle, Picker: bw})
go bw.lbWatcher()
return bw
}
@@ -243,7 +242,7 @@ func (bw *balancerWrapper) HandleSubConnStateChange(sc balancer.SubConn, s conne
if bw.state != sa {
bw.state = sa
}
- bw.cc.UpdateBalancerState(bw.state, bw)
+ bw.cc.UpdateState(balancer.State{ConnectivityState: bw.state, Picker: bw})
if s == connectivity.Shutdown {
// Remove state for this sc.
delete(bw.connSt, sc)
@@ -275,17 +274,17 @@ func (bw *balancerWrapper) Close() {
// The picker is the balancerWrapper itself.
// It either blocks or returns error, consistent with v1 balancer Get().
-func (bw *balancerWrapper) Pick(ctx context.Context, opts balancer.PickOptions) (sc balancer.SubConn, done func(balancer.DoneInfo), err error) {
+func (bw *balancerWrapper) Pick(info balancer.PickInfo) (result balancer.PickResult, err error) {
failfast := true // Default failfast is true.
- if ss, ok := rpcInfoFromContext(ctx); ok {
+ if ss, ok := rpcInfoFromContext(info.Ctx); ok {
failfast = ss.failfast
}
- a, p, err := bw.balancer.Get(ctx, BalancerGetOptions{BlockingWait: !failfast})
+ a, p, err := bw.balancer.Get(info.Ctx, BalancerGetOptions{BlockingWait: !failfast})
if err != nil {
- return nil, nil, err
+ return balancer.PickResult{}, toRPCErr(err)
}
if p != nil {
- done = func(balancer.DoneInfo) { p() }
+ result.Done = func(balancer.DoneInfo) { p() }
defer func() {
if err != nil {
p()
@@ -297,38 +296,39 @@ func (bw *balancerWrapper) Pick(ctx context.Context, opts balancer.PickOptions)
defer bw.mu.Unlock()
if bw.pickfirst {
// Get the first sc in conns.
- for _, sc := range bw.conns {
- return sc, done, nil
+ for _, result.SubConn = range bw.conns {
+ return result, nil
}
- return nil, nil, balancer.ErrNoSubConnAvailable
+ return balancer.PickResult{}, balancer.ErrNoSubConnAvailable
}
- sc, ok1 := bw.conns[resolver.Address{
+ var ok1 bool
+ result.SubConn, ok1 = bw.conns[resolver.Address{
Addr: a.Addr,
Type: resolver.Backend,
ServerName: "",
Metadata: a.Metadata,
}]
- s, ok2 := bw.connSt[sc]
+ s, ok2 := bw.connSt[result.SubConn]
if !ok1 || !ok2 {
// This can only happen due to a race where Get() returned an address
// that was subsequently removed by Notify. In this case we should
// retry always.
- return nil, nil, balancer.ErrNoSubConnAvailable
+ return balancer.PickResult{}, balancer.ErrNoSubConnAvailable
}
switch s.s {
case connectivity.Ready, connectivity.Idle:
- return sc, done, nil
+ return result, nil
case connectivity.Shutdown, connectivity.TransientFailure:
// If the returned sc has been shut down or is in transient failure,
// return error, and this RPC will fail or wait for another picker (if
// non-failfast).
- return nil, nil, balancer.ErrTransientFailure
+ return balancer.PickResult{}, balancer.ErrTransientFailure
default:
// For other states (connecting or unknown), the v1 balancer would
// traditionally wait until ready and then issue the RPC. Returning
// ErrNoSubConnAvailable will be a slight improvement in that it will
// allow the balancer to choose another address in case others are
// connected.
- return nil, nil, balancer.ErrNoSubConnAvailable
+ return balancer.PickResult{}, balancer.ErrNoSubConnAvailable
}
}
diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go
index a7643df7d..f58740b25 100644
--- a/vendor/google.golang.org/grpc/clientconn.go
+++ b/vendor/google.golang.org/grpc/clientconn.go
@@ -31,7 +31,7 @@ import (
"time"
"google.golang.org/grpc/balancer"
- _ "google.golang.org/grpc/balancer/roundrobin" // To register roundrobin.
+ "google.golang.org/grpc/balancer/base"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/connectivity"
"google.golang.org/grpc/credentials"
@@ -42,10 +42,12 @@ import (
"google.golang.org/grpc/internal/transport"
"google.golang.org/grpc/keepalive"
"google.golang.org/grpc/resolver"
- _ "google.golang.org/grpc/resolver/dns" // To register dns resolver.
- _ "google.golang.org/grpc/resolver/passthrough" // To register passthrough resolver.
"google.golang.org/grpc/serviceconfig"
"google.golang.org/grpc/status"
+
+ _ "google.golang.org/grpc/balancer/roundrobin" // To register roundrobin.
+ _ "google.golang.org/grpc/internal/resolver/dns" // To register dns resolver.
+ _ "google.golang.org/grpc/internal/resolver/passthrough" // To register passthrough resolver.
)
const (
@@ -186,11 +188,11 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
}
if cc.dopts.defaultServiceConfigRawJSON != nil {
- sc, err := parseServiceConfig(*cc.dopts.defaultServiceConfigRawJSON)
- if err != nil {
- return nil, fmt.Errorf("%s: %v", invalidDefaultServiceConfigErrPrefix, err)
+ scpr := parseServiceConfig(*cc.dopts.defaultServiceConfigRawJSON)
+ if scpr.Err != nil {
+ return nil, fmt.Errorf("%s: %v", invalidDefaultServiceConfigErrPrefix, scpr.Err)
}
- cc.dopts.defaultServiceConfig = sc
+ cc.dopts.defaultServiceConfig, _ = scpr.Config.(*ServiceConfig)
}
cc.mkp = cc.dopts.copts.KeepaliveParams
@@ -235,29 +237,28 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
}
}
if cc.dopts.bs == nil {
- cc.dopts.bs = backoff.Exponential{
- MaxDelay: DefaultBackoffConfig.MaxDelay,
+ cc.dopts.bs = backoff.DefaultExponential
+ }
+
+ // Determine the resolver to use.
+ cc.parsedTarget = parseTarget(cc.target)
+ grpclog.Infof("parsed scheme: %q", cc.parsedTarget.Scheme)
+ resolverBuilder := cc.getResolver(cc.parsedTarget.Scheme)
+ if resolverBuilder == nil {
+ // If resolver builder is still nil, the parsed target's scheme is
+ // not registered. Fallback to default resolver and set Endpoint to
+ // the original target.
+ grpclog.Infof("scheme %q not registered, fallback to default scheme", cc.parsedTarget.Scheme)
+ cc.parsedTarget = resolver.Target{
+ Scheme: resolver.GetDefaultScheme(),
+ Endpoint: target,
}
- }
- if cc.dopts.resolverBuilder == nil {
- // Only try to parse target when resolver builder is not already set.
- cc.parsedTarget = parseTarget(cc.target)
- grpclog.Infof("parsed scheme: %q", cc.parsedTarget.Scheme)
- cc.dopts.resolverBuilder = resolver.Get(cc.parsedTarget.Scheme)
- if cc.dopts.resolverBuilder == nil {
- // If resolver builder is still nil, the parsed target's scheme is
- // not registered. Fallback to default resolver and set Endpoint to
- // the original target.
- grpclog.Infof("scheme %q not registered, fallback to default scheme", cc.parsedTarget.Scheme)
- cc.parsedTarget = resolver.Target{
- Scheme: resolver.GetDefaultScheme(),
- Endpoint: target,
- }
- cc.dopts.resolverBuilder = resolver.Get(cc.parsedTarget.Scheme)
+ resolverBuilder = cc.getResolver(cc.parsedTarget.Scheme)
+ if resolverBuilder == nil {
+ return nil, fmt.Errorf("could not get resolver for default scheme: %q", cc.parsedTarget.Scheme)
}
- } else {
- cc.parsedTarget = resolver.Target{Endpoint: target}
}
+
creds := cc.dopts.copts.TransportCredentials
if creds != nil && creds.Info().ServerName != "" {
cc.authority = creds.Info().ServerName
@@ -297,14 +298,14 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
}
// Build the resolver.
- rWrapper, err := newCCResolverWrapper(cc)
+ rWrapper, err := newCCResolverWrapper(cc, resolverBuilder)
if err != nil {
return nil, fmt.Errorf("failed to build resolver: %v", err)
}
-
cc.mu.Lock()
cc.resolverWrapper = rWrapper
cc.mu.Unlock()
+
// A blocking dial blocks until the clientConn is ready.
if cc.dopts.block {
for {
@@ -443,7 +444,32 @@ func (csm *connectivityStateManager) getNotifyChan() <-chan struct{} {
return csm.notifyChan
}
-// ClientConn represents a client connection to an RPC server.
+// ClientConnInterface defines the functions clients need to perform unary and
+// streaming RPCs. It is implemented by *ClientConn, and is only intended to
+// be referenced by generated code.
+type ClientConnInterface interface {
+ // Invoke performs a unary RPC and returns after the response is received
+ // into reply.
+ Invoke(ctx context.Context, method string, args interface{}, reply interface{}, opts ...CallOption) error
+ // NewStream begins a streaming RPC.
+ NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error)
+}
+
+// Assert *ClientConn implements ClientConnInterface.
+var _ ClientConnInterface = (*ClientConn)(nil)
+
+// ClientConn represents a virtual connection to a conceptual endpoint, to
+// perform RPCs.
+//
+// A ClientConn is free to have zero or more actual connections to the endpoint
+// based on configuration, load, etc. It is also free to determine which actual
+// endpoints to use and may change it every RPC, permitting client-side load
+// balancing.
+//
+// A ClientConn encapsulates a range of functionality including name
+// resolution, TCP connection establishment (with retries and backoff) and TLS
+// handshakes. It also handles errors on established connections by
+// re-resolving the name and reconnecting.
type ClientConn struct {
ctx context.Context
cancel context.CancelFunc
@@ -532,58 +558,104 @@ func (cc *ClientConn) waitForResolvedAddrs(ctx context.Context) error {
}
}
-func (cc *ClientConn) updateResolverState(s resolver.State) error {
+var emptyServiceConfig *ServiceConfig
+
+func init() {
+ cfg := parseServiceConfig("{}")
+ if cfg.Err != nil {
+ panic(fmt.Sprintf("impossible error parsing empty service config: %v", cfg.Err))
+ }
+ emptyServiceConfig = cfg.Config.(*ServiceConfig)
+}
+
+func (cc *ClientConn) maybeApplyDefaultServiceConfig(addrs []resolver.Address) {
+ if cc.sc != nil {
+ cc.applyServiceConfigAndBalancer(cc.sc, addrs)
+ return
+ }
+ if cc.dopts.defaultServiceConfig != nil {
+ cc.applyServiceConfigAndBalancer(cc.dopts.defaultServiceConfig, addrs)
+ } else {
+ cc.applyServiceConfigAndBalancer(emptyServiceConfig, addrs)
+ }
+}
+
+func (cc *ClientConn) updateResolverState(s resolver.State, err error) error {
+ defer cc.firstResolveEvent.Fire()
cc.mu.Lock()
- defer cc.mu.Unlock()
// Check if the ClientConn is already closed. Some fields (e.g.
// balancerWrapper) are set to nil when closing the ClientConn, and could
// cause nil pointer panic if we don't have this check.
if cc.conns == nil {
+ cc.mu.Unlock()
return nil
}
- if cc.dopts.disableServiceConfig || s.ServiceConfig == nil {
- if cc.dopts.defaultServiceConfig != nil && cc.sc == nil {
- cc.applyServiceConfig(cc.dopts.defaultServiceConfig)
+ if err != nil {
+ // May need to apply the initial service config in case the resolver
+ // doesn't support service configs, or doesn't provide a service config
+ // with the new addresses.
+ cc.maybeApplyDefaultServiceConfig(nil)
+
+ if cc.balancerWrapper != nil {
+ cc.balancerWrapper.resolverError(err)
}
- } else if sc, ok := s.ServiceConfig.(*ServiceConfig); ok {
- cc.applyServiceConfig(sc)
+
+ // No addresses are valid with err set; return early.
+ cc.mu.Unlock()
+ return balancer.ErrBadResolverState
}
- var balCfg serviceconfig.LoadBalancingConfig
- if cc.dopts.balancerBuilder == nil {
- // Only look at balancer types and switch balancer if balancer dial
- // option is not set.
- var newBalancerName string
- if cc.sc != nil && cc.sc.lbConfig != nil {
- newBalancerName = cc.sc.lbConfig.name
- balCfg = cc.sc.lbConfig.cfg
+ var ret error
+ if cc.dopts.disableServiceConfig || s.ServiceConfig == nil {
+ cc.maybeApplyDefaultServiceConfig(s.Addresses)
+ // TODO: do we need to apply a failing LB policy if there is no
+ // default, per the error handling design?
+ } else {
+ if sc, ok := s.ServiceConfig.Config.(*ServiceConfig); s.ServiceConfig.Err == nil && ok {
+ cc.applyServiceConfigAndBalancer(sc, s.Addresses)
} else {
- var isGRPCLB bool
- for _, a := range s.Addresses {
- if a.Type == resolver.GRPCLB {
- isGRPCLB = true
- break
+ ret = balancer.ErrBadResolverState
+ if cc.balancerWrapper == nil {
+ var err error
+ if s.ServiceConfig.Err != nil {
+ err = status.Errorf(codes.Unavailable, "error parsing service config: %v", s.ServiceConfig.Err)
+ } else {
+ err = status.Errorf(codes.Unavailable, "illegal service config type: %T", s.ServiceConfig.Config)
}
- }
- if isGRPCLB {
- newBalancerName = grpclbName
- } else if cc.sc != nil && cc.sc.LB != nil {
- newBalancerName = *cc.sc.LB
- } else {
- newBalancerName = PickFirstBalancerName
+ cc.blockingpicker.updatePicker(base.NewErrPicker(err))
+ cc.csMgr.updateState(connectivity.TransientFailure)
+ cc.mu.Unlock()
+ return ret
}
}
- cc.switchBalancer(newBalancerName)
- } else if cc.balancerWrapper == nil {
- // Balancer dial option was set, and this is the first time handling
- // resolved addresses. Build a balancer with dopts.balancerBuilder.
- cc.curBalancerName = cc.dopts.balancerBuilder.Name()
- cc.balancerWrapper = newCCBalancerWrapper(cc, cc.dopts.balancerBuilder, cc.balancerBuildOpts)
}
- cc.balancerWrapper.updateClientConnState(&balancer.ClientConnState{ResolverState: s, BalancerConfig: balCfg})
- return nil
+ var balCfg serviceconfig.LoadBalancingConfig
+ if cc.dopts.balancerBuilder == nil && cc.sc != nil && cc.sc.lbConfig != nil {
+ balCfg = cc.sc.lbConfig.cfg
+ }
+
+ cbn := cc.curBalancerName
+ bw := cc.balancerWrapper
+ cc.mu.Unlock()
+ if cbn != grpclbName {
+ // Filter any grpclb addresses since we don't have the grpclb balancer.
+ for i := 0; i < len(s.Addresses); {
+ if s.Addresses[i].Type == resolver.GRPCLB {
+ copy(s.Addresses[i:], s.Addresses[i+1:])
+ s.Addresses = s.Addresses[:len(s.Addresses)-1]
+ continue
+ }
+ i++
+ }
+ }
+ uccsErr := bw.updateClientConnState(&balancer.ClientConnState{ResolverState: s, BalancerConfig: balCfg})
+ if ret == nil {
+ ret = uccsErr // prefer ErrBadResolver state since any other error is
+ // currently meaningless to the caller.
+ }
+ return ret
}
// switchBalancer starts the switching from current balancer to the balancer
@@ -631,7 +703,7 @@ func (cc *ClientConn) switchBalancer(name string) {
cc.balancerWrapper = newCCBalancerWrapper(cc, builder, cc.balancerBuildOpts)
}
-func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
+func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) {
cc.mu.Lock()
if cc.conns == nil {
cc.mu.Unlock()
@@ -639,7 +711,7 @@ func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivi
}
// TODO(bar switching) send updates to all balancer wrappers when balancer
// gracefully switching is supported.
- cc.balancerWrapper.handleSubConnStateChange(sc, s)
+ cc.balancerWrapper.handleSubConnStateChange(sc, s, err)
cc.mu.Unlock()
}
@@ -736,7 +808,7 @@ func (ac *addrConn) connect() error {
}
// Update connectivity state within the lock to prevent subsequent or
// concurrent calls from resetting the transport more than once.
- ac.updateConnectivityState(connectivity.Connecting)
+ ac.updateConnectivityState(connectivity.Connecting, nil)
ac.mu.Unlock()
// Start a goroutine connecting to the server asynchronously.
@@ -822,7 +894,8 @@ func (cc *ClientConn) healthCheckConfig() *healthCheckConfig {
}
func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, func(balancer.DoneInfo), error) {
- t, done, err := cc.blockingpicker.pick(ctx, failfast, balancer.PickOptions{
+ t, done, err := cc.blockingpicker.pick(ctx, failfast, balancer.PickInfo{
+ Ctx: ctx,
FullMethodName: method,
})
if err != nil {
@@ -831,10 +904,10 @@ func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method st
return t, done, nil
}
-func (cc *ClientConn) applyServiceConfig(sc *ServiceConfig) error {
+func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, addrs []resolver.Address) {
if sc == nil {
// should never reach here.
- return fmt.Errorf("got nil pointer for service config")
+ return
}
cc.sc = sc
@@ -850,10 +923,38 @@ func (cc *ClientConn) applyServiceConfig(sc *ServiceConfig) error {
cc.retryThrottler.Store((*retryThrottler)(nil))
}
- return nil
+ if cc.dopts.balancerBuilder == nil {
+ // Only look at balancer types and switch balancer if balancer dial
+ // option is not set.
+ var newBalancerName string
+ if cc.sc != nil && cc.sc.lbConfig != nil {
+ newBalancerName = cc.sc.lbConfig.name
+ } else {
+ var isGRPCLB bool
+ for _, a := range addrs {
+ if a.Type == resolver.GRPCLB {
+ isGRPCLB = true
+ break
+ }
+ }
+ if isGRPCLB {
+ newBalancerName = grpclbName
+ } else if cc.sc != nil && cc.sc.LB != nil {
+ newBalancerName = *cc.sc.LB
+ } else {
+ newBalancerName = PickFirstBalancerName
+ }
+ }
+ cc.switchBalancer(newBalancerName)
+ } else if cc.balancerWrapper == nil {
+ // Balancer dial option was set, and this is the first time handling
+ // resolved addresses. Build a balancer with dopts.balancerBuilder.
+ cc.curBalancerName = cc.dopts.balancerBuilder.Name()
+ cc.balancerWrapper = newCCBalancerWrapper(cc, cc.dopts.balancerBuilder, cc.balancerBuildOpts)
+ }
}
-func (cc *ClientConn) resolveNow(o resolver.ResolveNowOption) {
+func (cc *ClientConn) resolveNow(o resolver.ResolveNowOptions) {
cc.mu.RLock()
r := cc.resolverWrapper
cc.mu.RUnlock()
@@ -875,8 +976,9 @@ func (cc *ClientConn) resolveNow(o resolver.ResolveNowOption) {
// This API is EXPERIMENTAL.
func (cc *ClientConn) ResetConnectBackoff() {
cc.mu.Lock()
- defer cc.mu.Unlock()
- for ac := range cc.conns {
+ conns := cc.conns
+ cc.mu.Unlock()
+ for ac := range conns {
ac.resetConnectBackoff()
}
}
@@ -962,7 +1064,7 @@ type addrConn struct {
}
// Note: this requires a lock on ac.mu.
-func (ac *addrConn) updateConnectivityState(s connectivity.State) {
+func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error) {
if ac.state == s {
return
}
@@ -975,7 +1077,7 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State) {
Severity: channelz.CtINFO,
})
}
- ac.cc.handleSubConnStateChange(ac.acbw, s)
+ ac.cc.handleSubConnStateChange(ac.acbw, s, lastErr)
}
// adjustParams updates parameters used to create transports upon
@@ -995,7 +1097,7 @@ func (ac *addrConn) adjustParams(r transport.GoAwayReason) {
func (ac *addrConn) resetTransport() {
for i := 0; ; i++ {
if i > 0 {
- ac.cc.resolveNow(resolver.ResolveNowOption{})
+ ac.cc.resolveNow(resolver.ResolveNowOptions{})
}
ac.mu.Lock()
@@ -1024,7 +1126,7 @@ func (ac *addrConn) resetTransport() {
// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md#proposed-backoff-algorithm
connectDeadline := time.Now().Add(dialDuration)
- ac.updateConnectivityState(connectivity.Connecting)
+ ac.updateConnectivityState(connectivity.Connecting, nil)
ac.transport = nil
ac.mu.Unlock()
@@ -1037,7 +1139,7 @@ func (ac *addrConn) resetTransport() {
ac.mu.Unlock()
return
}
- ac.updateConnectivityState(connectivity.TransientFailure)
+ ac.updateConnectivityState(connectivity.TransientFailure, err)
// Backoff.
b := ac.resetBackoff
@@ -1093,6 +1195,7 @@ func (ac *addrConn) resetTransport() {
// first successful one. It returns the transport, the address and a Event in
// the successful case. The Event fires when the returned transport disconnects.
func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.Time) (transport.ClientTransport, resolver.Address, *grpcsync.Event, error) {
+ var firstConnErr error
for _, addr := range addrs {
ac.mu.Lock()
if ac.state == connectivity.Shutdown {
@@ -1121,11 +1224,14 @@ func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.T
if err == nil {
return newTr, addr, reconnect, nil
}
+ if firstConnErr == nil {
+ firstConnErr = err
+ }
ac.cc.blockingpicker.updateConnectionError(err)
}
// Couldn't connect to any address.
- return nil, resolver.Address{}, nil, fmt.Errorf("couldn't connect to any address")
+ return nil, resolver.Address{}, nil, firstConnErr
}
// createTransport creates a connection to addr. It returns the transport and a
@@ -1136,10 +1242,16 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne
onCloseCalled := make(chan struct{})
reconnect := grpcsync.NewEvent()
+ authority := ac.cc.authority
+ // addr.ServerName takes precedent over ClientConn authority, if present.
+ if addr.ServerName != "" {
+ authority = addr.ServerName
+ }
+
target := transport.TargetInfo{
Addr: addr.Addr,
Metadata: addr.Metadata,
- Authority: ac.cc.authority,
+ Authority: authority,
}
once := sync.Once{}
@@ -1152,7 +1264,7 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne
// state to Connecting.
//
// TODO: this should be Idle when grpc-go properly supports it.
- ac.updateConnectivityState(connectivity.Connecting)
+ ac.updateConnectivityState(connectivity.Connecting, nil)
}
})
ac.mu.Unlock()
@@ -1167,7 +1279,7 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne
// state to Connecting.
//
// TODO: this should be Idle when grpc-go properly supports it.
- ac.updateConnectivityState(connectivity.Connecting)
+ ac.updateConnectivityState(connectivity.Connecting, nil)
}
})
ac.mu.Unlock()
@@ -1193,7 +1305,7 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne
}
select {
- case <-time.After(connectDeadline.Sub(time.Now())):
+ case <-time.After(time.Until(connectDeadline)):
// We didn't get the preface in time.
newTr.Close()
grpclog.Warningf("grpc: addrConn.createTransport failed to connect to %v: didn't receive server preface in time. Reconnecting...", addr)
@@ -1224,7 +1336,7 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) {
var healthcheckManagingState bool
defer func() {
if !healthcheckManagingState {
- ac.updateConnectivityState(connectivity.Ready)
+ ac.updateConnectivityState(connectivity.Ready, nil)
}
}()
@@ -1260,13 +1372,13 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) {
ac.mu.Unlock()
return newNonRetryClientStream(ctx, &StreamDesc{ServerStreams: true}, method, currentTr, ac)
}
- setConnectivityState := func(s connectivity.State) {
+ setConnectivityState := func(s connectivity.State, lastErr error) {
ac.mu.Lock()
defer ac.mu.Unlock()
if ac.transport != currentTr {
return
}
- ac.updateConnectivityState(s)
+ ac.updateConnectivityState(s, lastErr)
}
// Start the health checking stream.
go func() {
@@ -1331,8 +1443,8 @@ func (ac *addrConn) tearDown(err error) {
curTr := ac.transport
ac.transport = nil
// We have to set the state to Shutdown before anything else to prevent races
- // between setting the state and logic that waits on context cancelation / etc.
- ac.updateConnectivityState(connectivity.Shutdown)
+ // between setting the state and logic that waits on context cancellation / etc.
+ ac.updateConnectivityState(connectivity.Shutdown, nil)
ac.cancel()
ac.curAddr = resolver.Address{}
if err == errConnDrain && curTr != nil {
@@ -1355,7 +1467,7 @@ func (ac *addrConn) tearDown(err error) {
},
})
// TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to
- // the entity beng deleted, and thus prevent it from being deleted right away.
+ // the entity being deleted, and thus prevent it from being deleted right away.
channelz.RemoveEntry(ac.channelzID)
}
ac.mu.Unlock()
@@ -1445,3 +1557,12 @@ func (c *channelzChannel) ChannelzMetric() *channelz.ChannelInternalMetric {
// Deprecated: This error is never returned by grpc and should not be
// referenced by users.
var ErrClientConnTimeout = errors.New("grpc: timed out when dialing")
+
+func (cc *ClientConn) getResolver(scheme string) resolver.Builder {
+ for _, rb := range cc.dopts.resolvers {
+ if cc.parsedTarget.Scheme == rb.Scheme() {
+ return rb
+ }
+ }
+ return resolver.Get(cc.parsedTarget.Scheme)
+}
diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go
index 8ea3d4a1d..845ce5d21 100644
--- a/vendor/google.golang.org/grpc/credentials/credentials.go
+++ b/vendor/google.golang.org/grpc/credentials/credentials.go
@@ -24,16 +24,12 @@ package credentials // import "google.golang.org/grpc/credentials"
import (
"context"
- "crypto/tls"
- "crypto/x509"
"errors"
"fmt"
- "io/ioutil"
"net"
- "strings"
"github.com/golang/protobuf/proto"
- "google.golang.org/grpc/credentials/internal"
+ "google.golang.org/grpc/internal"
)
// PerRPCCredentials defines the common interface for the credentials which need to
@@ -45,7 +41,8 @@ type PerRPCCredentials interface {
// context. If a status code is returned, it will be used as the status
// for the RPC. uri is the URI of the entry point for the request.
// When supported by the underlying implementation, ctx can be used for
- // timeout and cancellation.
+ // timeout and cancellation. Additionally, RequestInfo data will be
+ // available via ctx to this call.
// TODO(zhaoq): Define the set of the qualified keys instead of leaving
// it as an arbitrary string.
GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error)
@@ -54,6 +51,48 @@ type PerRPCCredentials interface {
RequireTransportSecurity() bool
}
+// SecurityLevel defines the protection level on an established connection.
+//
+// This API is experimental.
+type SecurityLevel int
+
+const (
+ // NoSecurity indicates a connection is insecure.
+ // The zero SecurityLevel value is invalid for backward compatibility.
+ NoSecurity SecurityLevel = iota + 1
+ // IntegrityOnly indicates a connection only provides integrity protection.
+ IntegrityOnly
+ // PrivacyAndIntegrity indicates a connection provides both privacy and integrity protection.
+ PrivacyAndIntegrity
+)
+
+// String returns SecurityLevel in a string format.
+func (s SecurityLevel) String() string {
+ switch s {
+ case NoSecurity:
+ return "NoSecurity"
+ case IntegrityOnly:
+ return "IntegrityOnly"
+ case PrivacyAndIntegrity:
+ return "PrivacyAndIntegrity"
+ }
+ return fmt.Sprintf("invalid SecurityLevel: %v", int(s))
+}
+
+// CommonAuthInfo contains authenticated information common to AuthInfo implementations.
+// It should be embedded in a struct implementing AuthInfo to provide additional information
+// about the credentials.
+//
+// This API is experimental.
+type CommonAuthInfo struct {
+ SecurityLevel SecurityLevel
+}
+
+// GetCommonAuthInfo returns the pointer to CommonAuthInfo struct.
+func (c *CommonAuthInfo) GetCommonAuthInfo() *CommonAuthInfo {
+ return c
+}
+
// ProtocolInfo provides information regarding the gRPC wire protocol version,
// security protocol, security protocol version in use, server name, etc.
type ProtocolInfo struct {
@@ -68,6 +107,8 @@ type ProtocolInfo struct {
}
// AuthInfo defines the common interface for the auth information the users are interested in.
+// A struct that implements AuthInfo should embed CommonAuthInfo by including additional
+// information about the credentials in it.
type AuthInfo interface {
AuthType() string
}
@@ -82,7 +123,8 @@ type TransportCredentials interface {
// ClientHandshake does the authentication handshake specified by the corresponding
// authentication protocol on rawConn for clients. It returns the authenticated
// connection and the corresponding auth information about the connection.
- // Implementations must use the provided context to implement timely cancellation.
+ // The auth information should embed CommonAuthInfo to return additional information about
+ // the credentials. Implementations must use the provided context to implement timely cancellation.
// gRPC will try to reconnect if the error returned is a temporary error
// (io.EOF, context.DeadlineExceeded or err.Temporary() == true).
// If the returned error is a wrapper error, implementations should make sure that
@@ -92,7 +134,8 @@ type TransportCredentials interface {
ClientHandshake(context.Context, string, net.Conn) (net.Conn, AuthInfo, error)
// ServerHandshake does the authentication handshake for servers. It returns
// the authenticated connection and the corresponding auth information about
- // the connection.
+ // the connection. The auth information should embed CommonAuthInfo to return additional information
+ // about the credentials.
//
// If the returned net.Conn is closed, it MUST close the net.Conn provided.
ServerHandshake(net.Conn) (net.Conn, AuthInfo, error)
@@ -125,145 +168,63 @@ type Bundle interface {
NewWithMode(mode string) (Bundle, error)
}
-// TLSInfo contains the auth information for a TLS authenticated connection.
-// It implements the AuthInfo interface.
-type TLSInfo struct {
- State tls.ConnectionState
-}
-
-// AuthType returns the type of TLSInfo as a string.
-func (t TLSInfo) AuthType() string {
- return "tls"
+// RequestInfo contains request data attached to the context passed to GetRequestMetadata calls.
+//
+// This API is experimental.
+type RequestInfo struct {
+ // The method passed to Invoke or NewStream for this RPC. (For proto methods, this has the format "/some.Service/Method")
+ Method string
+ // AuthInfo contains the information from a security handshake (TransportCredentials.ClientHandshake, TransportCredentials.ServerHandshake)
+ AuthInfo AuthInfo
}
-// GetSecurityValue returns security info requested by channelz.
-func (t TLSInfo) GetSecurityValue() ChannelzSecurityValue {
- v := &TLSChannelzSecurityValue{
- StandardName: cipherSuiteLookup[t.State.CipherSuite],
- }
- // Currently there's no way to get LocalCertificate info from tls package.
- if len(t.State.PeerCertificates) > 0 {
- v.RemoteCertificate = t.State.PeerCertificates[0].Raw
- }
- return v
-}
+// requestInfoKey is a struct to be used as the key when attaching a RequestInfo to a context object.
+type requestInfoKey struct{}
-// tlsCreds is the credentials required for authenticating a connection using TLS.
-type tlsCreds struct {
- // TLS configuration
- config *tls.Config
+// RequestInfoFromContext extracts the RequestInfo from the context if it exists.
+//
+// This API is experimental.
+func RequestInfoFromContext(ctx context.Context) (ri RequestInfo, ok bool) {
+ ri, ok = ctx.Value(requestInfoKey{}).(RequestInfo)
+ return
}
-func (c tlsCreds) Info() ProtocolInfo {
- return ProtocolInfo{
- SecurityProtocol: "tls",
- SecurityVersion: "1.2",
- ServerName: c.config.ServerName,
+// CheckSecurityLevel checks if a connection's security level is greater than or equal to the specified one.
+// It returns success if 1) the condition is satisified or 2) AuthInfo struct does not implement GetCommonAuthInfo() method
+// or 3) CommonAuthInfo.SecurityLevel has an invalid zero value. For 2) and 3), it is for the purpose of backward-compatibility.
+//
+// This API is experimental.
+func CheckSecurityLevel(ctx context.Context, level SecurityLevel) error {
+ type internalInfo interface {
+ GetCommonAuthInfo() *CommonAuthInfo
}
-}
-
-func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (_ net.Conn, _ AuthInfo, err error) {
- // use local cfg to avoid clobbering ServerName if using multiple endpoints
- cfg := cloneTLSConfig(c.config)
- if cfg.ServerName == "" {
- colonPos := strings.LastIndex(authority, ":")
- if colonPos == -1 {
- colonPos = len(authority)
- }
- cfg.ServerName = authority[:colonPos]
+ ri, _ := RequestInfoFromContext(ctx)
+ if ri.AuthInfo == nil {
+ return errors.New("unable to obtain SecurityLevel from context")
}
- conn := tls.Client(rawConn, cfg)
- errChannel := make(chan error, 1)
- go func() {
- errChannel <- conn.Handshake()
- }()
- select {
- case err := <-errChannel:
- if err != nil {
- return nil, nil, err
+ if ci, ok := ri.AuthInfo.(internalInfo); ok {
+ // CommonAuthInfo.SecurityLevel has an invalid value.
+ if ci.GetCommonAuthInfo().SecurityLevel == 0 {
+ return nil
}
- case <-ctx.Done():
- return nil, nil, ctx.Err()
- }
- return internal.WrapSyscallConn(rawConn, conn), TLSInfo{conn.ConnectionState()}, nil
-}
-
-func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) {
- conn := tls.Server(rawConn, c.config)
- if err := conn.Handshake(); err != nil {
- return nil, nil, err
- }
- return internal.WrapSyscallConn(rawConn, conn), TLSInfo{conn.ConnectionState()}, nil
-}
-
-func (c *tlsCreds) Clone() TransportCredentials {
- return NewTLS(c.config)
-}
-
-func (c *tlsCreds) OverrideServerName(serverNameOverride string) error {
- c.config.ServerName = serverNameOverride
- return nil
-}
-
-const alpnProtoStrH2 = "h2"
-
-func appendH2ToNextProtos(ps []string) []string {
- for _, p := range ps {
- if p == alpnProtoStrH2 {
- return ps
+ if ci.GetCommonAuthInfo().SecurityLevel < level {
+ return fmt.Errorf("requires SecurityLevel %v; connection has %v", level, ci.GetCommonAuthInfo().SecurityLevel)
}
}
- ret := make([]string, 0, len(ps)+1)
- ret = append(ret, ps...)
- return append(ret, alpnProtoStrH2)
-}
-
-// NewTLS uses c to construct a TransportCredentials based on TLS.
-func NewTLS(c *tls.Config) TransportCredentials {
- tc := &tlsCreds{cloneTLSConfig(c)}
- tc.config.NextProtos = appendH2ToNextProtos(tc.config.NextProtos)
- return tc
-}
-
-// NewClientTLSFromCert constructs TLS credentials from the input certificate for client.
-// serverNameOverride is for testing only. If set to a non empty string,
-// it will override the virtual host name of authority (e.g. :authority header field) in requests.
-func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) TransportCredentials {
- return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp})
-}
-
-// NewClientTLSFromFile constructs TLS credentials from the input certificate file for client.
-// serverNameOverride is for testing only. If set to a non empty string,
-// it will override the virtual host name of authority (e.g. :authority header field) in requests.
-func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredentials, error) {
- b, err := ioutil.ReadFile(certFile)
- if err != nil {
- return nil, err
- }
- cp := x509.NewCertPool()
- if !cp.AppendCertsFromPEM(b) {
- return nil, fmt.Errorf("credentials: failed to append certificates")
- }
- return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}), nil
-}
-
-// NewServerTLSFromCert constructs TLS credentials from the input certificate for server.
-func NewServerTLSFromCert(cert *tls.Certificate) TransportCredentials {
- return NewTLS(&tls.Config{Certificates: []tls.Certificate{*cert}})
+ // The condition is satisfied or AuthInfo struct does not implement GetCommonAuthInfo() method.
+ return nil
}
-// NewServerTLSFromFile constructs TLS credentials from the input certificate file and key
-// file for server.
-func NewServerTLSFromFile(certFile, keyFile string) (TransportCredentials, error) {
- cert, err := tls.LoadX509KeyPair(certFile, keyFile)
- if err != nil {
- return nil, err
+func init() {
+ internal.NewRequestInfoContext = func(ctx context.Context, ri RequestInfo) context.Context {
+ return context.WithValue(ctx, requestInfoKey{}, ri)
}
- return NewTLS(&tls.Config{Certificates: []tls.Certificate{cert}}), nil
}
// ChannelzSecurityInfo defines the interface that security protocols should implement
// in order to provide security info to channelz.
+//
+// This API is experimental.
type ChannelzSecurityInfo interface {
GetSecurityValue() ChannelzSecurityValue
}
@@ -271,66 +232,20 @@ type ChannelzSecurityInfo interface {
// ChannelzSecurityValue defines the interface that GetSecurityValue() return value
// should satisfy. This interface should only be satisfied by *TLSChannelzSecurityValue
// and *OtherChannelzSecurityValue.
+//
+// This API is experimental.
type ChannelzSecurityValue interface {
isChannelzSecurityValue()
}
-// TLSChannelzSecurityValue defines the struct that TLS protocol should return
-// from GetSecurityValue(), containing security info like cipher and certificate used.
-type TLSChannelzSecurityValue struct {
- ChannelzSecurityValue
- StandardName string
- LocalCertificate []byte
- RemoteCertificate []byte
-}
-
// OtherChannelzSecurityValue defines the struct that non-TLS protocol should return
// from GetSecurityValue(), which contains protocol specific security info. Note
// the Value field will be sent to users of channelz requesting channel info, and
// thus sensitive info should better be avoided.
+//
+// This API is experimental.
type OtherChannelzSecurityValue struct {
ChannelzSecurityValue
Name string
Value proto.Message
}
-
-var cipherSuiteLookup = map[uint16]string{
- tls.TLS_RSA_WITH_RC4_128_SHA: "TLS_RSA_WITH_RC4_128_SHA",
- tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_RSA_WITH_3DES_EDE_CBC_SHA",
- tls.TLS_RSA_WITH_AES_128_CBC_SHA: "TLS_RSA_WITH_AES_128_CBC_SHA",
- tls.TLS_RSA_WITH_AES_256_CBC_SHA: "TLS_RSA_WITH_AES_256_CBC_SHA",
- tls.TLS_RSA_WITH_AES_128_GCM_SHA256: "TLS_RSA_WITH_AES_128_GCM_SHA256",
- tls.TLS_RSA_WITH_AES_256_GCM_SHA384: "TLS_RSA_WITH_AES_256_GCM_SHA384",
- tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA: "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA",
- tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA",
- tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA",
- tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA: "TLS_ECDHE_RSA_WITH_RC4_128_SHA",
- tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA",
- tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA",
- tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA",
- tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
- tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
- tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
- tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384",
- tls.TLS_FALLBACK_SCSV: "TLS_FALLBACK_SCSV",
- tls.TLS_RSA_WITH_AES_128_CBC_SHA256: "TLS_RSA_WITH_AES_128_CBC_SHA256",
- tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256",
- tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256",
- tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305",
- tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305",
-}
-
-// cloneTLSConfig returns a shallow clone of the exported
-// fields of cfg, ignoring the unexported sync.Once, which
-// contains a mutex and must not be copied.
-//
-// If cfg is nil, a new zero tls.Config is returned.
-//
-// TODO: inline this function if possible.
-func cloneTLSConfig(cfg *tls.Config) *tls.Config {
- if cfg == nil {
- return &tls.Config{}
- }
-
- return cfg.Clone()
-}
diff --git a/vendor/google.golang.org/grpc/credentials/tls13.go b/vendor/google.golang.org/grpc/credentials/go12.go
similarity index 100%
rename from vendor/google.golang.org/grpc/credentials/tls13.go
rename to vendor/google.golang.org/grpc/credentials/go12.go
diff --git a/vendor/google.golang.org/grpc/credentials/tls.go b/vendor/google.golang.org/grpc/credentials/tls.go
new file mode 100644
index 000000000..28b4f6232
--- /dev/null
+++ b/vendor/google.golang.org/grpc/credentials/tls.go
@@ -0,0 +1,225 @@
+/*
+ *
+ * Copyright 2014 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package credentials
+
+import (
+ "context"
+ "crypto/tls"
+ "crypto/x509"
+ "fmt"
+ "io/ioutil"
+ "net"
+
+ "google.golang.org/grpc/credentials/internal"
+)
+
+// TLSInfo contains the auth information for a TLS authenticated connection.
+// It implements the AuthInfo interface.
+type TLSInfo struct {
+ State tls.ConnectionState
+ CommonAuthInfo
+}
+
+// AuthType returns the type of TLSInfo as a string.
+func (t TLSInfo) AuthType() string {
+ return "tls"
+}
+
+// GetSecurityValue returns security info requested by channelz.
+func (t TLSInfo) GetSecurityValue() ChannelzSecurityValue {
+ v := &TLSChannelzSecurityValue{
+ StandardName: cipherSuiteLookup[t.State.CipherSuite],
+ }
+ // Currently there's no way to get LocalCertificate info from tls package.
+ if len(t.State.PeerCertificates) > 0 {
+ v.RemoteCertificate = t.State.PeerCertificates[0].Raw
+ }
+ return v
+}
+
+// tlsCreds is the credentials required for authenticating a connection using TLS.
+type tlsCreds struct {
+ // TLS configuration
+ config *tls.Config
+}
+
+func (c tlsCreds) Info() ProtocolInfo {
+ return ProtocolInfo{
+ SecurityProtocol: "tls",
+ SecurityVersion: "1.2",
+ ServerName: c.config.ServerName,
+ }
+}
+
+func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (_ net.Conn, _ AuthInfo, err error) {
+ // use local cfg to avoid clobbering ServerName if using multiple endpoints
+ cfg := cloneTLSConfig(c.config)
+ if cfg.ServerName == "" {
+ serverName, _, err := net.SplitHostPort(authority)
+ if err != nil {
+ // If the authority had no host port or if the authority cannot be parsed, use it as-is.
+ serverName = authority
+ }
+ cfg.ServerName = serverName
+ }
+ conn := tls.Client(rawConn, cfg)
+ errChannel := make(chan error, 1)
+ go func() {
+ errChannel <- conn.Handshake()
+ close(errChannel)
+ }()
+ select {
+ case err := <-errChannel:
+ if err != nil {
+ conn.Close()
+ return nil, nil, err
+ }
+ case <-ctx.Done():
+ conn.Close()
+ return nil, nil, ctx.Err()
+ }
+ return internal.WrapSyscallConn(rawConn, conn), TLSInfo{conn.ConnectionState(), CommonAuthInfo{PrivacyAndIntegrity}}, nil
+}
+
+func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) {
+ conn := tls.Server(rawConn, c.config)
+ if err := conn.Handshake(); err != nil {
+ conn.Close()
+ return nil, nil, err
+ }
+ return internal.WrapSyscallConn(rawConn, conn), TLSInfo{conn.ConnectionState(), CommonAuthInfo{PrivacyAndIntegrity}}, nil
+}
+
+func (c *tlsCreds) Clone() TransportCredentials {
+ return NewTLS(c.config)
+}
+
+func (c *tlsCreds) OverrideServerName(serverNameOverride string) error {
+ c.config.ServerName = serverNameOverride
+ return nil
+}
+
+const alpnProtoStrH2 = "h2"
+
+func appendH2ToNextProtos(ps []string) []string {
+ for _, p := range ps {
+ if p == alpnProtoStrH2 {
+ return ps
+ }
+ }
+ ret := make([]string, 0, len(ps)+1)
+ ret = append(ret, ps...)
+ return append(ret, alpnProtoStrH2)
+}
+
+// NewTLS uses c to construct a TransportCredentials based on TLS.
+func NewTLS(c *tls.Config) TransportCredentials {
+ tc := &tlsCreds{cloneTLSConfig(c)}
+ tc.config.NextProtos = appendH2ToNextProtos(tc.config.NextProtos)
+ return tc
+}
+
+// NewClientTLSFromCert constructs TLS credentials from the input certificate for client.
+// serverNameOverride is for testing only. If set to a non empty string,
+// it will override the virtual host name of authority (e.g. :authority header field) in requests.
+func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) TransportCredentials {
+ return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp})
+}
+
+// NewClientTLSFromFile constructs TLS credentials from the input certificate file for client.
+// serverNameOverride is for testing only. If set to a non empty string,
+// it will override the virtual host name of authority (e.g. :authority header field) in requests.
+func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredentials, error) {
+ b, err := ioutil.ReadFile(certFile)
+ if err != nil {
+ return nil, err
+ }
+ cp := x509.NewCertPool()
+ if !cp.AppendCertsFromPEM(b) {
+ return nil, fmt.Errorf("credentials: failed to append certificates")
+ }
+ return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}), nil
+}
+
+// NewServerTLSFromCert constructs TLS credentials from the input certificate for server.
+func NewServerTLSFromCert(cert *tls.Certificate) TransportCredentials {
+ return NewTLS(&tls.Config{Certificates: []tls.Certificate{*cert}})
+}
+
+// NewServerTLSFromFile constructs TLS credentials from the input certificate file and key
+// file for server.
+func NewServerTLSFromFile(certFile, keyFile string) (TransportCredentials, error) {
+ cert, err := tls.LoadX509KeyPair(certFile, keyFile)
+ if err != nil {
+ return nil, err
+ }
+ return NewTLS(&tls.Config{Certificates: []tls.Certificate{cert}}), nil
+}
+
+// TLSChannelzSecurityValue defines the struct that TLS protocol should return
+// from GetSecurityValue(), containing security info like cipher and certificate used.
+//
+// This API is EXPERIMENTAL.
+type TLSChannelzSecurityValue struct {
+ ChannelzSecurityValue
+ StandardName string
+ LocalCertificate []byte
+ RemoteCertificate []byte
+}
+
+var cipherSuiteLookup = map[uint16]string{
+ tls.TLS_RSA_WITH_RC4_128_SHA: "TLS_RSA_WITH_RC4_128_SHA",
+ tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_RSA_WITH_3DES_EDE_CBC_SHA",
+ tls.TLS_RSA_WITH_AES_128_CBC_SHA: "TLS_RSA_WITH_AES_128_CBC_SHA",
+ tls.TLS_RSA_WITH_AES_256_CBC_SHA: "TLS_RSA_WITH_AES_256_CBC_SHA",
+ tls.TLS_RSA_WITH_AES_128_GCM_SHA256: "TLS_RSA_WITH_AES_128_GCM_SHA256",
+ tls.TLS_RSA_WITH_AES_256_GCM_SHA384: "TLS_RSA_WITH_AES_256_GCM_SHA384",
+ tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA: "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA",
+ tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA",
+ tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA",
+ tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA: "TLS_ECDHE_RSA_WITH_RC4_128_SHA",
+ tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA",
+ tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA",
+ tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA",
+ tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
+ tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
+ tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
+ tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384",
+ tls.TLS_FALLBACK_SCSV: "TLS_FALLBACK_SCSV",
+ tls.TLS_RSA_WITH_AES_128_CBC_SHA256: "TLS_RSA_WITH_AES_128_CBC_SHA256",
+ tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256",
+ tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256",
+ tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305",
+ tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305",
+}
+
+// cloneTLSConfig returns a shallow clone of the exported
+// fields of cfg, ignoring the unexported sync.Once, which
+// contains a mutex and must not be copied.
+//
+// If cfg is nil, a new zero tls.Config is returned.
+//
+// TODO: inline this function if possible.
+func cloneTLSConfig(cfg *tls.Config) *tls.Config {
+ if cfg == nil {
+ return &tls.Config{}
+ }
+
+ return cfg.Clone()
+}
diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go
index e8f34d0d6..63f5ae21d 100644
--- a/vendor/google.golang.org/grpc/dialoptions.go
+++ b/vendor/google.golang.org/grpc/dialoptions.go
@@ -24,11 +24,12 @@ import (
"net"
"time"
+ "google.golang.org/grpc/backoff"
"google.golang.org/grpc/balancer"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/internal"
- "google.golang.org/grpc/internal/backoff"
+ internalbackoff "google.golang.org/grpc/internal/backoff"
"google.golang.org/grpc/internal/envconfig"
"google.golang.org/grpc/internal/transport"
"google.golang.org/grpc/keepalive"
@@ -47,7 +48,7 @@ type dialOptions struct {
cp Compressor
dc Decompressor
- bs backoff.Strategy
+ bs internalbackoff.Strategy
block bool
insecure bool
timeout time.Duration
@@ -57,9 +58,7 @@ type dialOptions struct {
callOptions []CallOption
// This is used by v1 balancer dial option WithBalancer to support v1
// balancer, and also by WithBalancerName dial option.
- balancerBuilder balancer.Builder
- // This is to support grpclb.
- resolverBuilder resolver.Builder
+ balancerBuilder balancer.Builder
channelzParentID int64
disableServiceConfig bool
disableRetry bool
@@ -68,6 +67,11 @@ type dialOptions struct {
minConnectTimeout func() time.Duration
defaultServiceConfig *ServiceConfig // defaultServiceConfig is parsed from defaultServiceConfigRawJSON.
defaultServiceConfigRawJSON *string
+ // This is used by ccResolverWrapper to backoff between successive calls to
+ // resolver.ResolveNow(). The user will have no need to configure this, but
+ // we need to be able to configure this in tests.
+ resolveNowBackoff func(int) time.Duration
+ resolvers []resolver.Builder
}
// DialOption configures how we set up the connection.
@@ -226,13 +230,6 @@ func WithBalancerName(balancerName string) DialOption {
})
}
-// withResolverBuilder is only for grpclb.
-func withResolverBuilder(b resolver.Builder) DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.resolverBuilder = b
- })
-}
-
// WithServiceConfig returns a DialOption which has a channel to read the
// service configuration.
//
@@ -246,8 +243,28 @@ func WithServiceConfig(c <-chan ServiceConfig) DialOption {
})
}
+// WithConnectParams configures the dialer to use the provided ConnectParams.
+//
+// The backoff configuration specified as part of the ConnectParams overrides
+// all defaults specified in
+// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. Consider
+// using the backoff.DefaultConfig as a base, in cases where you want to
+// override only a subset of the backoff configuration.
+//
+// This API is EXPERIMENTAL.
+func WithConnectParams(p ConnectParams) DialOption {
+ return newFuncDialOption(func(o *dialOptions) {
+ o.bs = internalbackoff.Exponential{Config: p.Backoff}
+ o.minConnectTimeout = func() time.Duration {
+ return p.MinConnectTimeout
+ }
+ })
+}
+
// WithBackoffMaxDelay configures the dialer to use the provided maximum delay
// when backing off after failed connection attempts.
+//
+// Deprecated: use WithConnectParams instead. Will be supported throughout 1.x.
func WithBackoffMaxDelay(md time.Duration) DialOption {
return WithBackoffConfig(BackoffConfig{MaxDelay: md})
}
@@ -255,19 +272,18 @@ func WithBackoffMaxDelay(md time.Duration) DialOption {
// WithBackoffConfig configures the dialer to use the provided backoff
// parameters after connection failures.
//
-// Use WithBackoffMaxDelay until more parameters on BackoffConfig are opened up
-// for use.
+// Deprecated: use WithConnectParams instead. Will be supported throughout 1.x.
func WithBackoffConfig(b BackoffConfig) DialOption {
- return withBackoff(backoff.Exponential{
- MaxDelay: b.MaxDelay,
- })
+ bc := backoff.DefaultConfig
+ bc.MaxDelay = b.MaxDelay
+ return withBackoff(internalbackoff.Exponential{Config: bc})
}
// withBackoff sets the backoff strategy used for connectRetryNum after a failed
// connection attempt.
//
// This can be exported if arbitrary backoff strategies are allowed by gRPC.
-func withBackoff(bs backoff.Strategy) DialOption {
+func withBackoff(bs internalbackoff.Strategy) DialOption {
return newFuncDialOption(func(o *dialOptions) {
o.bs = bs
})
@@ -322,8 +338,8 @@ func WithCredentialsBundle(b credentials.Bundle) DialOption {
// WithTimeout returns a DialOption that configures a timeout for dialing a
// ClientConn initially. This is valid if and only if WithBlock() is present.
//
-// Deprecated: use DialContext and context.WithTimeout instead. Will be
-// supported throughout 1.x.
+// Deprecated: use DialContext instead of Dial and context.WithTimeout
+// instead. Will be supported throughout 1.x.
func WithTimeout(d time.Duration) DialOption {
return newFuncDialOption(func(o *dialOptions) {
o.timeout = d
@@ -341,7 +357,6 @@ func WithContextDialer(f func(context.Context, string) (net.Conn, error)) DialOp
}
func init() {
- internal.WithResolverBuilder = withResolverBuilder
internal.WithHealthCheckFunc = withHealthCheckFunc
}
@@ -455,6 +470,8 @@ func WithAuthority(a string) DialOption {
// WithChannelzParentID returns a DialOption that specifies the channelz ID of
// current ClientConn's parent. This function is used in nested channel creation
// (e.g. grpclb dial).
+//
+// This API is EXPERIMENTAL.
func WithChannelzParentID(id int64) DialOption {
return newFuncDialOption(func(o *dialOptions) {
o.channelzParentID = id
@@ -539,6 +556,7 @@ func defaultDialOptions() dialOptions {
WriteBufferSize: defaultWriteBufSize,
ReadBufferSize: defaultReadBufSize,
},
+ resolveNowBackoff: internalbackoff.DefaultExponential.Backoff,
}
}
@@ -552,3 +570,25 @@ func withMinConnectDeadline(f func() time.Duration) DialOption {
o.minConnectTimeout = f
})
}
+
+// withResolveNowBackoff specifies the function that clientconn uses to backoff
+// between successive calls to resolver.ResolveNow().
+//
+// For testing purpose only.
+func withResolveNowBackoff(f func(int) time.Duration) DialOption {
+ return newFuncDialOption(func(o *dialOptions) {
+ o.resolveNowBackoff = f
+ })
+}
+
+// WithResolvers allows a list of resolver implementations to be registered
+// locally with the ClientConn without needing to be globally registered via
+// resolver.Register. They will be matched against the scheme used for the
+// current Dial only, and will take precedence over the global registry.
+//
+// This API is EXPERIMENTAL.
+func WithResolvers(rs ...resolver.Builder) DialOption {
+ return newFuncDialOption(func(o *dialOptions) {
+ o.resolvers = append(o.resolvers, rs...)
+ })
+}
diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go
index 30a75da99..195e8448b 100644
--- a/vendor/google.golang.org/grpc/encoding/encoding.go
+++ b/vendor/google.golang.org/grpc/encoding/encoding.go
@@ -46,6 +46,10 @@ type Compressor interface {
// coding header. The result must be static; the result cannot change
// between calls.
Name() string
+ // EXPERIMENTAL: if a Compressor implements
+ // DecompressedSize(compressedBytes []byte) int, gRPC will call it
+ // to determine the size of the buffer allocated for the result of decompression.
+ // Return -1 to indicate unknown size.
}
var registeredCompressor = make(map[string]Compressor)
diff --git a/vendor/google.golang.org/grpc/go.mod b/vendor/google.golang.org/grpc/go.mod
index c1a8340c5..237836130 100644
--- a/vendor/google.golang.org/grpc/go.mod
+++ b/vendor/google.golang.org/grpc/go.mod
@@ -1,19 +1,16 @@
module google.golang.org/grpc
+go 1.11
+
require (
- cloud.google.com/go v0.26.0 // indirect
- github.com/BurntSushi/toml v0.3.1 // indirect
- github.com/client9/misspell v0.3.4
+ github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473
+ github.com/envoyproxy/protoc-gen-validate v0.1.0
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b
github.com/golang/mock v1.1.1
- github.com/golang/protobuf v1.2.0
+ github.com/golang/protobuf v1.3.2
github.com/google/go-cmp v0.2.0
- golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3
golang.org/x/net v0.0.0-20190311183353-d8887717615a
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a
- golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135
- google.golang.org/appengine v1.1.0 // indirect
- google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8
- honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc
+ google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55
)
diff --git a/vendor/google.golang.org/grpc/grpclog/grpclog.go b/vendor/google.golang.org/grpc/grpclog/grpclog.go
index 51bb9457c..874ea6d98 100644
--- a/vendor/google.golang.org/grpc/grpclog/grpclog.go
+++ b/vendor/google.golang.org/grpc/grpclog/grpclog.go
@@ -89,7 +89,7 @@ func Fatal(args ...interface{}) {
}
// Fatalf logs to the FATAL log. Arguments are handled in the manner of fmt.Printf.
-// It calles os.Exit() with exit code 1.
+// It calls os.Exit() with exit code 1.
func Fatalf(format string, args ...interface{}) {
logger.Fatalf(format, args...)
// Make sure fatal logs will exit.
diff --git a/vendor/google.golang.org/grpc/health/client.go b/vendor/google.golang.org/grpc/health/client.go
index b43746e61..b5bee4838 100644
--- a/vendor/google.golang.org/grpc/health/client.go
+++ b/vendor/google.golang.org/grpc/health/client.go
@@ -33,20 +33,20 @@ import (
"google.golang.org/grpc/status"
)
-const maxDelay = 120 * time.Second
-
-var backoffStrategy = backoff.Exponential{MaxDelay: maxDelay}
-var backoffFunc = func(ctx context.Context, retries int) bool {
- d := backoffStrategy.Backoff(retries)
- timer := time.NewTimer(d)
- select {
- case <-timer.C:
- return true
- case <-ctx.Done():
- timer.Stop()
- return false
+var (
+ backoffStrategy = backoff.DefaultExponential
+ backoffFunc = func(ctx context.Context, retries int) bool {
+ d := backoffStrategy.Backoff(retries)
+ timer := time.NewTimer(d)
+ select {
+ case <-timer.C:
+ return true
+ case <-ctx.Done():
+ timer.Stop()
+ return false
+ }
}
-}
+)
func init() {
internal.HealthCheckFunc = clientHealthCheck
@@ -56,7 +56,7 @@ const healthCheckMethod = "/grpc.health.v1.Health/Watch"
// This function implements the protocol defined at:
// https://github.com/grpc/grpc/blob/master/doc/health-checking.md
-func clientHealthCheck(ctx context.Context, newStream func(string) (interface{}, error), setConnectivityState func(connectivity.State), service string) error {
+func clientHealthCheck(ctx context.Context, newStream func(string) (interface{}, error), setConnectivityState func(connectivity.State, error), service string) error {
tryCnt := 0
retryConnection:
@@ -70,7 +70,7 @@ retryConnection:
if ctx.Err() != nil {
return nil
}
- setConnectivityState(connectivity.Connecting)
+ setConnectivityState(connectivity.Connecting, nil)
rawS, err := newStream(healthCheckMethod)
if err != nil {
continue retryConnection
@@ -79,7 +79,7 @@ retryConnection:
s, ok := rawS.(grpc.ClientStream)
// Ideally, this should never happen. But if it happens, the server is marked as healthy for LBing purposes.
if !ok {
- setConnectivityState(connectivity.Ready)
+ setConnectivityState(connectivity.Ready, nil)
return fmt.Errorf("newStream returned %v (type %T); want grpc.ClientStream", rawS, rawS)
}
@@ -95,22 +95,22 @@ retryConnection:
// Reports healthy for the LBing purposes if health check is not implemented in the server.
if status.Code(err) == codes.Unimplemented {
- setConnectivityState(connectivity.Ready)
+ setConnectivityState(connectivity.Ready, nil)
return err
}
// Reports unhealthy if server's Watch method gives an error other than UNIMPLEMENTED.
if err != nil {
- setConnectivityState(connectivity.TransientFailure)
+ setConnectivityState(connectivity.TransientFailure, fmt.Errorf("connection active but received health check RPC error: %v", err))
continue retryConnection
}
- // As a message has been received, removes the need for backoff for the next retry by reseting the try count.
+ // As a message has been received, removes the need for backoff for the next retry by resetting the try count.
tryCnt = 0
if resp.Status == healthpb.HealthCheckResponse_SERVING {
- setConnectivityState(connectivity.Ready)
+ setConnectivityState(connectivity.Ready, nil)
} else {
- setConnectivityState(connectivity.TransientFailure)
+ setConnectivityState(connectivity.TransientFailure, fmt.Errorf("connection active but health check failed. status=%s", resp.Status))
}
}
}
diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go
index c2f2c7729..c99e27ae5 100644
--- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go
+++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go
@@ -1,15 +1,16 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: grpc/health/v1/health.proto
-package grpc_health_v1 // import "google.golang.org/grpc/health/grpc_health_v1"
-
-import proto "github.com/golang/protobuf/proto"
-import fmt "fmt"
-import math "math"
+package grpc_health_v1
import (
- context "golang.org/x/net/context"
+ context "context"
+ fmt "fmt"
+ proto "github.com/golang/protobuf/proto"
grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
+ math "math"
)
// Reference imports to suppress errors if they are not otherwise used.
@@ -21,7 +22,7 @@ var _ = math.Inf
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
type HealthCheckResponse_ServingStatus int32
@@ -38,6 +39,7 @@ var HealthCheckResponse_ServingStatus_name = map[int32]string{
2: "NOT_SERVING",
3: "SERVICE_UNKNOWN",
}
+
var HealthCheckResponse_ServingStatus_value = map[string]int32{
"UNKNOWN": 0,
"SERVING": 1,
@@ -48,8 +50,9 @@ var HealthCheckResponse_ServingStatus_value = map[string]int32{
func (x HealthCheckResponse_ServingStatus) String() string {
return proto.EnumName(HealthCheckResponse_ServingStatus_name, int32(x))
}
+
func (HealthCheckResponse_ServingStatus) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_health_6b1a06aa67f91efd, []int{1, 0}
+ return fileDescriptor_e265fd9d4e077217, []int{1, 0}
}
type HealthCheckRequest struct {
@@ -63,16 +66,17 @@ func (m *HealthCheckRequest) Reset() { *m = HealthCheckRequest{} }
func (m *HealthCheckRequest) String() string { return proto.CompactTextString(m) }
func (*HealthCheckRequest) ProtoMessage() {}
func (*HealthCheckRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_health_6b1a06aa67f91efd, []int{0}
+ return fileDescriptor_e265fd9d4e077217, []int{0}
}
+
func (m *HealthCheckRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_HealthCheckRequest.Unmarshal(m, b)
}
func (m *HealthCheckRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_HealthCheckRequest.Marshal(b, m, deterministic)
}
-func (dst *HealthCheckRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_HealthCheckRequest.Merge(dst, src)
+func (m *HealthCheckRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_HealthCheckRequest.Merge(m, src)
}
func (m *HealthCheckRequest) XXX_Size() int {
return xxx_messageInfo_HealthCheckRequest.Size(m)
@@ -101,16 +105,17 @@ func (m *HealthCheckResponse) Reset() { *m = HealthCheckResponse{} }
func (m *HealthCheckResponse) String() string { return proto.CompactTextString(m) }
func (*HealthCheckResponse) ProtoMessage() {}
func (*HealthCheckResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_health_6b1a06aa67f91efd, []int{1}
+ return fileDescriptor_e265fd9d4e077217, []int{1}
}
+
func (m *HealthCheckResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_HealthCheckResponse.Unmarshal(m, b)
}
func (m *HealthCheckResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_HealthCheckResponse.Marshal(b, m, deterministic)
}
-func (dst *HealthCheckResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_HealthCheckResponse.Merge(dst, src)
+func (m *HealthCheckResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_HealthCheckResponse.Merge(m, src)
}
func (m *HealthCheckResponse) XXX_Size() int {
return xxx_messageInfo_HealthCheckResponse.Size(m)
@@ -129,9 +134,34 @@ func (m *HealthCheckResponse) GetStatus() HealthCheckResponse_ServingStatus {
}
func init() {
+ proto.RegisterEnum("grpc.health.v1.HealthCheckResponse_ServingStatus", HealthCheckResponse_ServingStatus_name, HealthCheckResponse_ServingStatus_value)
proto.RegisterType((*HealthCheckRequest)(nil), "grpc.health.v1.HealthCheckRequest")
proto.RegisterType((*HealthCheckResponse)(nil), "grpc.health.v1.HealthCheckResponse")
- proto.RegisterEnum("grpc.health.v1.HealthCheckResponse_ServingStatus", HealthCheckResponse_ServingStatus_name, HealthCheckResponse_ServingStatus_value)
+}
+
+func init() { proto.RegisterFile("grpc/health/v1/health.proto", fileDescriptor_e265fd9d4e077217) }
+
+var fileDescriptor_e265fd9d4e077217 = []byte{
+ // 297 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0x2f, 0x2a, 0x48,
+ 0xd6, 0xcf, 0x48, 0x4d, 0xcc, 0x29, 0xc9, 0xd0, 0x2f, 0x33, 0x84, 0xb2, 0xf4, 0x0a, 0x8a, 0xf2,
+ 0x4b, 0xf2, 0x85, 0xf8, 0x40, 0x92, 0x7a, 0x50, 0xa1, 0x32, 0x43, 0x25, 0x3d, 0x2e, 0x21, 0x0f,
+ 0x30, 0xc7, 0x39, 0x23, 0x35, 0x39, 0x3b, 0x28, 0xb5, 0xb0, 0x34, 0xb5, 0xb8, 0x44, 0x48, 0x82,
+ 0x8b, 0xbd, 0x38, 0xb5, 0xa8, 0x2c, 0x33, 0x39, 0x55, 0x82, 0x51, 0x81, 0x51, 0x83, 0x33, 0x08,
+ 0xc6, 0x55, 0xda, 0xc8, 0xc8, 0x25, 0x8c, 0xa2, 0xa1, 0xb8, 0x20, 0x3f, 0xaf, 0x38, 0x55, 0xc8,
+ 0x93, 0x8b, 0xad, 0xb8, 0x24, 0xb1, 0xa4, 0xb4, 0x18, 0xac, 0x81, 0xcf, 0xc8, 0x50, 0x0f, 0xd5,
+ 0x22, 0x3d, 0x2c, 0x9a, 0xf4, 0x82, 0x41, 0x86, 0xe6, 0xa5, 0x07, 0x83, 0x35, 0x06, 0x41, 0x0d,
+ 0x50, 0xf2, 0xe7, 0xe2, 0x45, 0x91, 0x10, 0xe2, 0xe6, 0x62, 0x0f, 0xf5, 0xf3, 0xf6, 0xf3, 0x0f,
+ 0xf7, 0x13, 0x60, 0x00, 0x71, 0x82, 0x5d, 0x83, 0xc2, 0x3c, 0xfd, 0xdc, 0x05, 0x18, 0x85, 0xf8,
+ 0xb9, 0xb8, 0xfd, 0xfc, 0x43, 0xe2, 0x61, 0x02, 0x4c, 0x42, 0xc2, 0x5c, 0xfc, 0x60, 0x8e, 0xb3,
+ 0x6b, 0x3c, 0x4c, 0x0b, 0xb3, 0xd1, 0x3a, 0x46, 0x2e, 0x36, 0x88, 0xf5, 0x42, 0x01, 0x5c, 0xac,
+ 0x60, 0x27, 0x08, 0x29, 0xe1, 0x75, 0x1f, 0x38, 0x14, 0xa4, 0x94, 0x89, 0xf0, 0x83, 0x50, 0x10,
+ 0x17, 0x6b, 0x78, 0x62, 0x49, 0x72, 0x06, 0xd5, 0x4c, 0x34, 0x60, 0x74, 0x4a, 0xe4, 0x12, 0xcc,
+ 0xcc, 0x47, 0x53, 0xea, 0xc4, 0x0d, 0x51, 0x1b, 0x00, 0x8a, 0xc6, 0x00, 0xc6, 0x28, 0x9d, 0xf4,
+ 0xfc, 0xfc, 0xf4, 0x9c, 0x54, 0xbd, 0xf4, 0xfc, 0x9c, 0xc4, 0xbc, 0x74, 0xbd, 0xfc, 0xa2, 0x74,
+ 0x7d, 0xe4, 0x78, 0x07, 0xb1, 0xe3, 0x21, 0xec, 0xf8, 0x32, 0xc3, 0x55, 0x4c, 0x7c, 0xee, 0x20,
+ 0xd3, 0x20, 0x46, 0xe8, 0x85, 0x19, 0x26, 0xb1, 0x81, 0x93, 0x83, 0x31, 0x20, 0x00, 0x00, 0xff,
+ 0xff, 0x12, 0x7d, 0x96, 0xcb, 0x2d, 0x02, 0x00, 0x00,
}
// Reference imports to suppress errors if they are not otherwise used.
@@ -239,6 +269,17 @@ type HealthServer interface {
Watch(*HealthCheckRequest, Health_WatchServer) error
}
+// UnimplementedHealthServer can be embedded to have forward compatible implementations.
+type UnimplementedHealthServer struct {
+}
+
+func (*UnimplementedHealthServer) Check(ctx context.Context, req *HealthCheckRequest) (*HealthCheckResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Check not implemented")
+}
+func (*UnimplementedHealthServer) Watch(req *HealthCheckRequest, srv Health_WatchServer) error {
+ return status.Errorf(codes.Unimplemented, "method Watch not implemented")
+}
+
func RegisterHealthServer(s *grpc.Server, srv HealthServer) {
s.RegisterService(&_Health_serviceDesc, srv)
}
@@ -300,28 +341,3 @@ var _Health_serviceDesc = grpc.ServiceDesc{
},
Metadata: "grpc/health/v1/health.proto",
}
-
-func init() { proto.RegisterFile("grpc/health/v1/health.proto", fileDescriptor_health_6b1a06aa67f91efd) }
-
-var fileDescriptor_health_6b1a06aa67f91efd = []byte{
- // 297 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0x2f, 0x2a, 0x48,
- 0xd6, 0xcf, 0x48, 0x4d, 0xcc, 0x29, 0xc9, 0xd0, 0x2f, 0x33, 0x84, 0xb2, 0xf4, 0x0a, 0x8a, 0xf2,
- 0x4b, 0xf2, 0x85, 0xf8, 0x40, 0x92, 0x7a, 0x50, 0xa1, 0x32, 0x43, 0x25, 0x3d, 0x2e, 0x21, 0x0f,
- 0x30, 0xc7, 0x39, 0x23, 0x35, 0x39, 0x3b, 0x28, 0xb5, 0xb0, 0x34, 0xb5, 0xb8, 0x44, 0x48, 0x82,
- 0x8b, 0xbd, 0x38, 0xb5, 0xa8, 0x2c, 0x33, 0x39, 0x55, 0x82, 0x51, 0x81, 0x51, 0x83, 0x33, 0x08,
- 0xc6, 0x55, 0xda, 0xc8, 0xc8, 0x25, 0x8c, 0xa2, 0xa1, 0xb8, 0x20, 0x3f, 0xaf, 0x38, 0x55, 0xc8,
- 0x93, 0x8b, 0xad, 0xb8, 0x24, 0xb1, 0xa4, 0xb4, 0x18, 0xac, 0x81, 0xcf, 0xc8, 0x50, 0x0f, 0xd5,
- 0x22, 0x3d, 0x2c, 0x9a, 0xf4, 0x82, 0x41, 0x86, 0xe6, 0xa5, 0x07, 0x83, 0x35, 0x06, 0x41, 0x0d,
- 0x50, 0xf2, 0xe7, 0xe2, 0x45, 0x91, 0x10, 0xe2, 0xe6, 0x62, 0x0f, 0xf5, 0xf3, 0xf6, 0xf3, 0x0f,
- 0xf7, 0x13, 0x60, 0x00, 0x71, 0x82, 0x5d, 0x83, 0xc2, 0x3c, 0xfd, 0xdc, 0x05, 0x18, 0x85, 0xf8,
- 0xb9, 0xb8, 0xfd, 0xfc, 0x43, 0xe2, 0x61, 0x02, 0x4c, 0x42, 0xc2, 0x5c, 0xfc, 0x60, 0x8e, 0xb3,
- 0x6b, 0x3c, 0x4c, 0x0b, 0xb3, 0xd1, 0x3a, 0x46, 0x2e, 0x36, 0x88, 0xf5, 0x42, 0x01, 0x5c, 0xac,
- 0x60, 0x27, 0x08, 0x29, 0xe1, 0x75, 0x1f, 0x38, 0x14, 0xa4, 0x94, 0x89, 0xf0, 0x83, 0x50, 0x10,
- 0x17, 0x6b, 0x78, 0x62, 0x49, 0x72, 0x06, 0xd5, 0x4c, 0x34, 0x60, 0x74, 0x4a, 0xe4, 0x12, 0xcc,
- 0xcc, 0x47, 0x53, 0xea, 0xc4, 0x0d, 0x51, 0x1b, 0x00, 0x8a, 0xc6, 0x00, 0xc6, 0x28, 0x9d, 0xf4,
- 0xfc, 0xfc, 0xf4, 0x9c, 0x54, 0xbd, 0xf4, 0xfc, 0x9c, 0xc4, 0xbc, 0x74, 0xbd, 0xfc, 0xa2, 0x74,
- 0x7d, 0xe4, 0x78, 0x07, 0xb1, 0xe3, 0x21, 0xec, 0xf8, 0x32, 0xc3, 0x55, 0x4c, 0x7c, 0xee, 0x20,
- 0xd3, 0x20, 0x46, 0xe8, 0x85, 0x19, 0x26, 0xb1, 0x81, 0x93, 0x83, 0x31, 0x20, 0x00, 0x00, 0xff,
- 0xff, 0x12, 0x7d, 0x96, 0xcb, 0x2d, 0x02, 0x00, 0x00,
-}
diff --git a/vendor/google.golang.org/grpc/health/server.go b/vendor/google.golang.org/grpc/health/server.go
index c79f9d2ab..2262607f8 100644
--- a/vendor/google.golang.org/grpc/health/server.go
+++ b/vendor/google.golang.org/grpc/health/server.go
@@ -35,7 +35,7 @@ import (
// Server implements `service Health`.
type Server struct {
- mu sync.Mutex
+ mu sync.RWMutex
// If shutdown is true, it's expected all serving status is NOT_SERVING, and
// will stay in NOT_SERVING.
shutdown bool
@@ -54,8 +54,8 @@ func NewServer() *Server {
// Check implements `service Health`.
func (s *Server) Check(ctx context.Context, in *healthpb.HealthCheckRequest) (*healthpb.HealthCheckResponse, error) {
- s.mu.Lock()
- defer s.mu.Unlock()
+ s.mu.RLock()
+ defer s.mu.RUnlock()
if servingStatus, ok := s.statusMap[in.Service]; ok {
return &healthpb.HealthCheckResponse{
Status: servingStatus,
@@ -139,7 +139,7 @@ func (s *Server) setServingStatusLocked(service string, servingStatus healthpb.H
// Shutdown sets all serving status to NOT_SERVING, and configures the server to
// ignore all future status changes.
//
-// This changes serving status for all services. To set status for a perticular
+// This changes serving status for all services. To set status for a particular
// services, call SetServingStatus().
func (s *Server) Shutdown() {
s.mu.Lock()
@@ -153,7 +153,7 @@ func (s *Server) Shutdown() {
// Resume sets all serving status to SERVING, and configures the server to
// accept all future status changes.
//
-// This changes serving status for all services. To set status for a perticular
+// This changes serving status for all services. To set status for a particular
// services, call SetServingStatus().
func (s *Server) Resume() {
s.mu.Lock()
diff --git a/vendor/google.golang.org/grpc/internal/backoff/backoff.go b/vendor/google.golang.org/grpc/internal/backoff/backoff.go
index 1bd0cce5a..5fc0ee3da 100644
--- a/vendor/google.golang.org/grpc/internal/backoff/backoff.go
+++ b/vendor/google.golang.org/grpc/internal/backoff/backoff.go
@@ -25,44 +25,39 @@ package backoff
import (
"time"
+ grpcbackoff "google.golang.org/grpc/backoff"
"google.golang.org/grpc/internal/grpcrand"
)
// Strategy defines the methodology for backing off after a grpc connection
// failure.
-//
type Strategy interface {
// Backoff returns the amount of time to wait before the next retry given
// the number of consecutive failures.
Backoff(retries int) time.Duration
}
-const (
- // baseDelay is the amount of time to wait before retrying after the first
- // failure.
- baseDelay = 1.0 * time.Second
- // factor is applied to the backoff after each retry.
- factor = 1.6
- // jitter provides a range to randomize backoff delays.
- jitter = 0.2
-)
+// DefaultExponential is an exponential backoff implementation using the
+// default values for all the configurable knobs defined in
+// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
+var DefaultExponential = Exponential{Config: grpcbackoff.DefaultConfig}
// Exponential implements exponential backoff algorithm as defined in
// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
type Exponential struct {
- // MaxDelay is the upper bound of backoff delay.
- MaxDelay time.Duration
+ // Config contains all options to configure the backoff algorithm.
+ Config grpcbackoff.Config
}
// Backoff returns the amount of time to wait before the next retry given the
// number of retries.
func (bc Exponential) Backoff(retries int) time.Duration {
if retries == 0 {
- return baseDelay
+ return bc.Config.BaseDelay
}
- backoff, max := float64(baseDelay), float64(bc.MaxDelay)
+ backoff, max := float64(bc.Config.BaseDelay), float64(bc.Config.MaxDelay)
for backoff < max && retries > 0 {
- backoff *= factor
+ backoff *= bc.Config.Multiplier
retries--
}
if backoff > max {
@@ -70,7 +65,7 @@ func (bc Exponential) Backoff(retries int) time.Duration {
}
// Randomize backoff delays so that if a cluster of requests start at
// the same time, they won't operate in lockstep.
- backoff *= 1 + jitter*(grpcrand.Float64()*2-1)
+ backoff *= 1 + bc.Config.Jitter*(grpcrand.Float64()*2-1)
if backoff < 0 {
return 0
}
diff --git a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go
index fee6aecd0..8b1051674 100644
--- a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go
+++ b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go
@@ -34,7 +34,7 @@ type Logger interface {
}
// binLogger is the global binary logger for the binary. One of this should be
-// built at init time from the configuration (environment varialbe or flags).
+// built at init time from the configuration (environment variable or flags).
//
// It is used to get a methodLogger for each individual method.
var binLogger Logger
@@ -98,7 +98,7 @@ func (l *logger) setDefaultMethodLogger(ml *methodLoggerConfig) error {
// New methodLogger with same service overrides the old one.
func (l *logger) setServiceMethodLogger(service string, ml *methodLoggerConfig) error {
if _, ok := l.services[service]; ok {
- return fmt.Errorf("conflicting rules for service %v found", service)
+ return fmt.Errorf("conflicting service rules for service %v found", service)
}
if l.services == nil {
l.services = make(map[string]*methodLoggerConfig)
@@ -112,10 +112,10 @@ func (l *logger) setServiceMethodLogger(service string, ml *methodLoggerConfig)
// New methodLogger with same method overrides the old one.
func (l *logger) setMethodMethodLogger(method string, ml *methodLoggerConfig) error {
if _, ok := l.blacklist[method]; ok {
- return fmt.Errorf("conflicting rules for method %v found", method)
+ return fmt.Errorf("conflicting blacklist rules for method %v found", method)
}
if _, ok := l.methods[method]; ok {
- return fmt.Errorf("conflicting rules for method %v found", method)
+ return fmt.Errorf("conflicting method rules for method %v found", method)
}
if l.methods == nil {
l.methods = make(map[string]*methodLoggerConfig)
@@ -127,10 +127,10 @@ func (l *logger) setMethodMethodLogger(method string, ml *methodLoggerConfig) er
// Set blacklist method for "-service/method".
func (l *logger) setBlacklist(method string) error {
if _, ok := l.blacklist[method]; ok {
- return fmt.Errorf("conflicting rules for method %v found", method)
+ return fmt.Errorf("conflicting blacklist rules for method %v found", method)
}
if _, ok := l.methods[method]; ok {
- return fmt.Errorf("conflicting rules for method %v found", method)
+ return fmt.Errorf("conflicting method rules for method %v found", method)
}
if l.blacklist == nil {
l.blacklist = make(map[string]struct{})
diff --git a/vendor/google.golang.org/grpc/internal/binarylog/env_config.go b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go
index 4cc2525df..be30d0e65 100644
--- a/vendor/google.golang.org/grpc/internal/binarylog/env_config.go
+++ b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go
@@ -43,7 +43,7 @@ import (
// Foo.
//
// If two configs exist for one certain method or service, the one specified
-// later overrides the privous config.
+// later overrides the previous config.
func NewLoggerFromConfigString(s string) Logger {
if s == "" {
return nil
@@ -74,7 +74,7 @@ func (l *logger) fillMethodLoggerWithConfigString(config string) error {
return fmt.Errorf("invalid config: %q, %v", config, err)
}
if m == "*" {
- return fmt.Errorf("invalid config: %q, %v", config, "* not allowd in blacklist config")
+ return fmt.Errorf("invalid config: %q, %v", config, "* not allowed in blacklist config")
}
if suffix != "" {
return fmt.Errorf("invalid config: %q, %v", config, "header/message limit not allowed in blacklist config")
diff --git a/vendor/google.golang.org/grpc/internal/binarylog/sink.go b/vendor/google.golang.org/grpc/internal/binarylog/sink.go
index 20d044f0f..a2e7c346d 100644
--- a/vendor/google.golang.org/grpc/internal/binarylog/sink.go
+++ b/vendor/google.golang.org/grpc/internal/binarylog/sink.go
@@ -63,7 +63,7 @@ func (ns *noopSink) Close() error { return nil }
// newWriterSink creates a binary log sink with the given writer.
//
-// Write() marshalls the proto message and writes it to the given writer. Each
+// Write() marshals the proto message and writes it to the given writer. Each
// message is prefixed with a 4 byte big endian unsigned integer as the length.
//
// No buffer is done, Close() doesn't try to close the writer.
diff --git a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go
new file mode 100644
index 000000000..9f6a0c120
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go
@@ -0,0 +1,85 @@
+/*
+ * Copyright 2019 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package buffer provides an implementation of an unbounded buffer.
+package buffer
+
+import "sync"
+
+// Unbounded is an implementation of an unbounded buffer which does not use
+// extra goroutines. This is typically used for passing updates from one entity
+// to another within gRPC.
+//
+// All methods on this type are thread-safe and don't block on anything except
+// the underlying mutex used for synchronization.
+//
+// Unbounded supports values of any type to be stored in it by using a channel
+// of `interface{}`. This means that a call to Put() incurs an extra memory
+// allocation, and also that users need a type assertion while reading. For
+// performance critical code paths, using Unbounded is strongly discouraged and
+// defining a new type specific implementation of this buffer is preferred. See
+// internal/transport/transport.go for an example of this.
+type Unbounded struct {
+ c chan interface{}
+ mu sync.Mutex
+ backlog []interface{}
+}
+
+// NewUnbounded returns a new instance of Unbounded.
+func NewUnbounded() *Unbounded {
+ return &Unbounded{c: make(chan interface{}, 1)}
+}
+
+// Put adds t to the unbounded buffer.
+func (b *Unbounded) Put(t interface{}) {
+ b.mu.Lock()
+ if len(b.backlog) == 0 {
+ select {
+ case b.c <- t:
+ b.mu.Unlock()
+ return
+ default:
+ }
+ }
+ b.backlog = append(b.backlog, t)
+ b.mu.Unlock()
+}
+
+// Load sends the earliest buffered data, if any, onto the read channel
+// returned by Get(). Users are expected to call this every time they read a
+// value from the read channel.
+func (b *Unbounded) Load() {
+ b.mu.Lock()
+ if len(b.backlog) > 0 {
+ select {
+ case b.c <- b.backlog[0]:
+ b.backlog[0] = nil
+ b.backlog = b.backlog[1:]
+ default:
+ }
+ }
+ b.mu.Unlock()
+}
+
+// Get returns a read channel on which values added to the buffer, via Put(),
+// are sent on.
+//
+// Upon reading a value from this channel, users are expected to call Load() to
+// send the next buffered value onto the channel if there is any.
+func (b *Unbounded) Get() <-chan interface{} {
+ return b.c
+}
diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
index 3ee8740f1..ae6c8972f 100644
--- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
+++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
@@ -25,11 +25,14 @@ import (
)
const (
- prefix = "GRPC_GO_"
- retryStr = prefix + "RETRY"
+ prefix = "GRPC_GO_"
+ retryStr = prefix + "RETRY"
+ txtErrIgnoreStr = prefix + "IGNORE_TXT_ERRORS"
)
var (
// Retry is set if retry is explicitly enabled via "GRPC_GO_RETRY=on".
Retry = strings.EqualFold(os.Getenv(retryStr), "on")
+ // TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false").
+ TXTErrIgnore = !strings.EqualFold(os.Getenv(retryStr), "false")
)
diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go
index bc1f99ac8..0912f0bf4 100644
--- a/vendor/google.golang.org/grpc/internal/internal.go
+++ b/vendor/google.golang.org/grpc/internal/internal.go
@@ -28,9 +28,7 @@ import (
)
var (
- // WithResolverBuilder is exported by dialoptions.go
- WithResolverBuilder interface{} // func (resolver.Builder) grpc.DialOption
- // WithHealthCheckFunc is not exported by dialoptions.go
+ // WithHealthCheckFunc is set by dialoptions.go
WithHealthCheckFunc interface{} // func (HealthChecker) DialOption
// HealthCheckFunc is used to provide client-side LB channel health checking
HealthCheckFunc HealthChecker
@@ -39,14 +37,17 @@ var (
// KeepaliveMinPingTime is the minimum ping interval. This must be 10s by
// default, but tests may wish to set it lower for convenience.
KeepaliveMinPingTime = 10 * time.Second
- // ParseServiceConfig is a function to parse JSON service configs into
- // opaque data structures.
- ParseServiceConfig func(sc string) (interface{}, error)
// StatusRawProto is exported by status/status.go. This func returns a
// pointer to the wrapped Status proto for a given status.Status without a
// call to proto.Clone(). The returned Status proto should not be mutated by
// the caller.
StatusRawProto interface{} // func (*status.Status) *spb.Status
+ // NewRequestInfoContext creates a new context based on the argument context attaching
+ // the passed in RequestInfo to the new context.
+ NewRequestInfoContext interface{} // func(context.Context, credentials.RequestInfo) context.Context
+ // ParseServiceConfigForTesting is for creating a fake
+ // ClientConn for resolver testing only
+ ParseServiceConfigForTesting interface{} // func(string) *serviceconfig.ParseResult
)
// HealthChecker defines the signature of the client-side LB channel health checking function.
@@ -57,7 +58,7 @@ var (
//
// The health checking protocol is defined at:
// https://github.com/grpc/grpc/blob/master/doc/health-checking.md
-type HealthChecker func(ctx context.Context, newStream func(string) (interface{}, error), setConnectivityState func(connectivity.State), serviceName string) error
+type HealthChecker func(ctx context.Context, newStream func(string) (interface{}, error), setConnectivityState func(connectivity.State, error), serviceName string) error
const (
// CredsBundleModeFallback switches GoogleDefaultCreds to fallback mode.
diff --git a/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go
similarity index 72%
rename from vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go
rename to vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go
index 297492e87..c368db62e 100644
--- a/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go
+++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go
@@ -33,18 +33,22 @@ import (
"time"
"google.golang.org/grpc/grpclog"
- "google.golang.org/grpc/internal/backoff"
+ "google.golang.org/grpc/internal/envconfig"
"google.golang.org/grpc/internal/grpcrand"
"google.golang.org/grpc/resolver"
+ "google.golang.org/grpc/serviceconfig"
)
+// EnableSRVLookups controls whether the DNS resolver attempts to fetch gRPCLB
+// addresses from SRV records. Must not be changed after init time.
+var EnableSRVLookups = false
+
func init() {
resolver.Register(NewBuilder())
}
const (
defaultPort = "443"
- defaultFreq = time.Minute * 30
defaultDNSSvrPort = "53"
golang = "GO"
// txtPrefix is the prefix string to be prepended to the host name for txt record lookup.
@@ -94,47 +98,33 @@ var customAuthorityResolver = func(authority string) (netResolver, error) {
// NewBuilder creates a dnsBuilder which is used to factory DNS resolvers.
func NewBuilder() resolver.Builder {
- return &dnsBuilder{minFreq: defaultFreq}
+ return &dnsBuilder{}
}
-type dnsBuilder struct {
- // minimum frequency of polling the DNS server.
- minFreq time.Duration
-}
+type dnsBuilder struct{}
// Build creates and starts a DNS resolver that watches the name resolution of the target.
-func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOption) (resolver.Resolver, error) {
+func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) {
host, port, err := parseTarget(target.Endpoint, defaultPort)
if err != nil {
return nil, err
}
// IP address.
- if net.ParseIP(host) != nil {
- host, _ = formatIP(host)
- addr := []resolver.Address{{Addr: host + ":" + port}}
- i := &ipResolver{
- cc: cc,
- ip: addr,
- rn: make(chan struct{}, 1),
- q: make(chan struct{}),
- }
- cc.NewAddress(addr)
- go i.watcher()
- return i, nil
+ if ipAddr, ok := formatIP(host); ok {
+ addr := []resolver.Address{{Addr: ipAddr + ":" + port}}
+ cc.UpdateState(resolver.State{Addresses: addr})
+ return deadResolver{}, nil
}
// DNS address (non-IP).
ctx, cancel := context.WithCancel(context.Background())
d := &dnsResolver{
- freq: b.minFreq,
- backoff: backoff.Exponential{MaxDelay: b.minFreq},
host: host,
port: port,
ctx: ctx,
cancel: cancel,
cc: cc,
- t: time.NewTimer(0),
rn: make(chan struct{}, 1),
disableServiceConfig: opts.DisableServiceConfig,
}
@@ -150,6 +140,7 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts
d.wg.Add(1)
go d.watcher()
+ d.ResolveNow(resolver.ResolveNowOptions{})
return d, nil
}
@@ -164,53 +155,23 @@ type netResolver interface {
LookupTXT(ctx context.Context, name string) (txts []string, err error)
}
-// ipResolver watches for the name resolution update for an IP address.
-type ipResolver struct {
- cc resolver.ClientConn
- ip []resolver.Address
- // rn channel is used by ResolveNow() to force an immediate resolution of the target.
- rn chan struct{}
- q chan struct{}
-}
-
-// ResolveNow resend the address it stores, no resolution is needed.
-func (i *ipResolver) ResolveNow(opt resolver.ResolveNowOption) {
- select {
- case i.rn <- struct{}{}:
- default:
- }
-}
+// deadResolver is a resolver that does nothing.
+type deadResolver struct{}
-// Close closes the ipResolver.
-func (i *ipResolver) Close() {
- close(i.q)
-}
+func (deadResolver) ResolveNow(resolver.ResolveNowOptions) {}
-func (i *ipResolver) watcher() {
- for {
- select {
- case <-i.rn:
- i.cc.NewAddress(i.ip)
- case <-i.q:
- return
- }
- }
-}
+func (deadResolver) Close() {}
// dnsResolver watches for the name resolution update for a non-IP target.
type dnsResolver struct {
- freq time.Duration
- backoff backoff.Exponential
- retryCount int
- host string
- port string
- resolver netResolver
- ctx context.Context
- cancel context.CancelFunc
- cc resolver.ClientConn
+ host string
+ port string
+ resolver netResolver
+ ctx context.Context
+ cancel context.CancelFunc
+ cc resolver.ClientConn
// rn channel is used by ResolveNow() to force an immediate resolution of the target.
rn chan struct{}
- t *time.Timer
// wg is used to enforce Close() to return after the watcher() goroutine has finished.
// Otherwise, data race will be possible. [Race Example] in dns_resolver_test we
// replace the real lookup functions with mocked ones to facilitate testing.
@@ -222,7 +183,7 @@ type dnsResolver struct {
}
// ResolveNow invoke an immediate resolution of the target that this dnsResolver watches.
-func (d *dnsResolver) ResolveNow(opt resolver.ResolveNowOption) {
+func (d *dnsResolver) ResolveNow(resolver.ResolveNowOptions) {
select {
case d.rn <- struct{}{}:
default:
@@ -233,7 +194,6 @@ func (d *dnsResolver) ResolveNow(opt resolver.ResolveNowOption) {
func (d *dnsResolver) Close() {
d.cancel()
d.wg.Wait()
- d.t.Stop()
}
func (d *dnsResolver) watcher() {
@@ -242,27 +202,15 @@ func (d *dnsResolver) watcher() {
select {
case <-d.ctx.Done():
return
- case <-d.t.C:
case <-d.rn:
- if !d.t.Stop() {
- // Before resetting a timer, it should be stopped to prevent racing with
- // reads on it's channel.
- <-d.t.C
- }
}
- result, sc := d.lookup()
- // Next lookup should happen within an interval defined by d.freq. It may be
- // more often due to exponential retry on empty address list.
- if len(result) == 0 {
- d.retryCount++
- d.t.Reset(d.backoff.Backoff(d.retryCount))
+ state, err := d.lookup()
+ if err != nil {
+ d.cc.ReportError(err)
} else {
- d.retryCount = 0
- d.t.Reset(d.freq)
+ d.cc.UpdateState(*state)
}
- d.cc.NewServiceConfig(sc)
- d.cc.NewAddress(result)
// Sleep to prevent excessive re-resolutions. Incoming resolution requests
// will be queued in d.rn.
@@ -276,37 +224,68 @@ func (d *dnsResolver) watcher() {
}
}
-func (d *dnsResolver) lookupSRV() []resolver.Address {
+func (d *dnsResolver) lookupSRV() ([]resolver.Address, error) {
+ if !EnableSRVLookups {
+ return nil, nil
+ }
var newAddrs []resolver.Address
_, srvs, err := d.resolver.LookupSRV(d.ctx, "grpclb", "tcp", d.host)
if err != nil {
- grpclog.Infof("grpc: failed dns SRV record lookup due to %v.\n", err)
- return nil
+ err = handleDNSError(err, "SRV") // may become nil
+ return nil, err
}
for _, s := range srvs {
lbAddrs, err := d.resolver.LookupHost(d.ctx, s.Target)
if err != nil {
- grpclog.Infof("grpc: failed load balancer address dns lookup due to %v.\n", err)
- continue
+ err = handleDNSError(err, "A") // may become nil
+ if err == nil {
+ // If there are other SRV records, look them up and ignore this
+ // one that does not exist.
+ continue
+ }
+ return nil, err
}
for _, a := range lbAddrs {
- a, ok := formatIP(a)
+ ip, ok := formatIP(a)
if !ok {
- grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err)
- continue
+ return nil, fmt.Errorf("dns: error parsing A record IP address %v", a)
}
- addr := a + ":" + strconv.Itoa(int(s.Port))
+ addr := ip + ":" + strconv.Itoa(int(s.Port))
newAddrs = append(newAddrs, resolver.Address{Addr: addr, Type: resolver.GRPCLB, ServerName: s.Target})
}
}
- return newAddrs
+ return newAddrs, nil
+}
+
+var filterError = func(err error) error {
+ if dnsErr, ok := err.(*net.DNSError); ok && !dnsErr.IsTimeout && !dnsErr.IsTemporary {
+ // Timeouts and temporary errors should be communicated to gRPC to
+ // attempt another DNS query (with backoff). Other errors should be
+ // suppressed (they may represent the absence of a TXT record).
+ return nil
+ }
+ return err
+}
+
+func handleDNSError(err error, lookupType string) error {
+ err = filterError(err)
+ if err != nil {
+ err = fmt.Errorf("dns: %v record lookup error: %v", lookupType, err)
+ grpclog.Infoln(err)
+ }
+ return err
}
-func (d *dnsResolver) lookupTXT() string {
+func (d *dnsResolver) lookupTXT() *serviceconfig.ParseResult {
ss, err := d.resolver.LookupTXT(d.ctx, txtPrefix+d.host)
if err != nil {
- grpclog.Infof("grpc: failed dns TXT record lookup due to %v.\n", err)
- return ""
+ if envconfig.TXTErrIgnore {
+ return nil
+ }
+ if err = handleDNSError(err, "TXT"); err != nil {
+ return &serviceconfig.ParseResult{Err: err}
+ }
+ return nil
}
var res string
for _, s := range ss {
@@ -315,40 +294,45 @@ func (d *dnsResolver) lookupTXT() string {
// TXT record must have "grpc_config=" attribute in order to be used as service config.
if !strings.HasPrefix(res, txtAttribute) {
- grpclog.Warningf("grpc: TXT record %v missing %v attribute", res, txtAttribute)
- return ""
+ grpclog.Warningf("dns: TXT record %v missing %v attribute", res, txtAttribute)
+ // This is not an error; it is the equivalent of not having a service config.
+ return nil
}
- return strings.TrimPrefix(res, txtAttribute)
+ sc := canaryingSC(strings.TrimPrefix(res, txtAttribute))
+ return d.cc.ParseServiceConfig(sc)
}
-func (d *dnsResolver) lookupHost() []resolver.Address {
+func (d *dnsResolver) lookupHost() ([]resolver.Address, error) {
var newAddrs []resolver.Address
addrs, err := d.resolver.LookupHost(d.ctx, d.host)
if err != nil {
- grpclog.Warningf("grpc: failed dns A record lookup due to %v.\n", err)
- return nil
+ err = handleDNSError(err, "A")
+ return nil, err
}
for _, a := range addrs {
- a, ok := formatIP(a)
+ ip, ok := formatIP(a)
if !ok {
- grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err)
- continue
+ return nil, fmt.Errorf("dns: error parsing A record IP address %v", a)
}
- addr := a + ":" + d.port
+ addr := ip + ":" + d.port
newAddrs = append(newAddrs, resolver.Address{Addr: addr})
}
- return newAddrs
+ return newAddrs, nil
}
-func (d *dnsResolver) lookup() ([]resolver.Address, string) {
- newAddrs := d.lookupSRV()
- // Support fallback to non-balancer address.
- newAddrs = append(newAddrs, d.lookupHost()...)
- if d.disableServiceConfig {
- return newAddrs, ""
+func (d *dnsResolver) lookup() (*resolver.State, error) {
+ srv, srvErr := d.lookupSRV()
+ addrs, hostErr := d.lookupHost()
+ if hostErr != nil && (srvErr != nil || len(srv) == 0) {
+ return nil, hostErr
+ }
+ state := &resolver.State{
+ Addresses: append(addrs, srv...),
+ }
+ if !d.disableServiceConfig {
+ state.ServiceConfig = d.lookupTXT()
}
- sc := d.lookupTXT()
- return newAddrs, canaryingSC(sc)
+ return state, nil
}
// formatIP returns ok = false if addr is not a valid textual representation of an IP address.
@@ -434,12 +418,12 @@ func canaryingSC(js string) string {
var rcs []rawChoice
err := json.Unmarshal([]byte(js), &rcs)
if err != nil {
- grpclog.Warningf("grpc: failed to parse service config json string due to %v.\n", err)
+ grpclog.Warningf("dns: error parsing service config json: %v", err)
return ""
}
cliHostname, err := os.Hostname()
if err != nil {
- grpclog.Warningf("grpc: failed to get client hostname due to %v.\n", err)
+ grpclog.Warningf("dns: error getting client hostname: %v", err)
return ""
}
var sc string
diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/go113.go b/vendor/google.golang.org/grpc/internal/resolver/dns/go113.go
new file mode 100644
index 000000000..8783a8cf8
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/resolver/dns/go113.go
@@ -0,0 +1,33 @@
+// +build go1.13
+
+/*
+ *
+ * Copyright 2019 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package dns
+
+import "net"
+
+func init() {
+ filterError = func(err error) error {
+ if dnsErr, ok := err.(*net.DNSError); ok && dnsErr.IsNotFound {
+ // The name does not exist; not an error.
+ return nil
+ }
+ return err
+ }
+}
diff --git a/vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go b/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go
similarity index 94%
rename from vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go
rename to vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go
index 893d5d12c..520d9229e 100644
--- a/vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go
+++ b/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go
@@ -26,7 +26,7 @@ const scheme = "passthrough"
type passthroughBuilder struct{}
-func (*passthroughBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOption) (resolver.Resolver, error) {
+func (*passthroughBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) {
r := &passthroughResolver{
target: target,
cc: cc,
@@ -48,7 +48,7 @@ func (r *passthroughResolver) start() {
r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: r.target.Endpoint}}})
}
-func (*passthroughResolver) ResolveNow(o resolver.ResolveNowOption) {}
+func (*passthroughResolver) ResolveNow(o resolver.ResolveNowOptions) {}
func (*passthroughResolver) Close() {}
diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go
index b8e0aa4db..ddee20b6b 100644
--- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go
+++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go
@@ -107,8 +107,8 @@ func (*registerStream) isTransportResponseFrame() bool { return false }
type headerFrame struct {
streamID uint32
hf []hpack.HeaderField
- endStream bool // Valid on server side.
- initStream func(uint32) (bool, error) // Used only on the client side.
+ endStream bool // Valid on server side.
+ initStream func(uint32) error // Used only on the client side.
onWrite func()
wq *writeQuota // write quota for the stream created.
cleanup *cleanupStream // Valid on the server side.
@@ -637,21 +637,17 @@ func (l *loopyWriter) headerHandler(h *headerFrame) error {
func (l *loopyWriter) originateStream(str *outStream) error {
hdr := str.itl.dequeue().(*headerFrame)
- sendPing, err := hdr.initStream(str.id)
- if err != nil {
+ if err := hdr.initStream(str.id); err != nil {
if err == ErrConnClosing {
return err
}
// Other errors(errStreamDrain) need not close transport.
return nil
}
- if err = l.writeHeader(str.id, hdr.endStream, hdr.hf, hdr.onWrite); err != nil {
+ if err := l.writeHeader(str.id, hdr.endStream, hdr.hf, hdr.onWrite); err != nil {
return err
}
l.estdStreams[str.id] = str
- if sendPing {
- return l.pingHandler(&ping{data: [8]byte{}})
- }
return nil
}
diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go
index 78f9ddc3d..c3c32dafe 100644
--- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go
+++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go
@@ -227,7 +227,9 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro
if err == nil { // transport has not been closed
if ht.stats != nil {
- ht.stats.HandleRPC(s.Context(), &stats.OutTrailer{})
+ ht.stats.HandleRPC(s.Context(), &stats.OutTrailer{
+ Trailer: s.trailer.Copy(),
+ })
}
}
ht.Close()
@@ -289,7 +291,9 @@ func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error {
if err == nil {
if ht.stats != nil {
- ht.stats.HandleRPC(s.Context(), &stats.OutHeader{})
+ ht.stats.HandleRPC(s.Context(), &stats.OutHeader{
+ Header: md.Copy(),
+ })
}
}
return err
@@ -334,7 +338,7 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace
Addr: ht.RemoteAddr(),
}
if req.TLS != nil {
- pr.AuthInfo = credentials.TLSInfo{State: *req.TLS}
+ pr.AuthInfo = credentials.TLSInfo{State: *req.TLS, CommonAuthInfo: credentials.CommonAuthInfo{credentials.PrivacyAndIntegrity}}
}
ctx = metadata.NewIncomingContext(ctx, ht.headerMD)
s.ctx = peer.NewContext(ctx, pr)
diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go
index 41a79c567..2d6feeb1b 100644
--- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go
+++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go
@@ -35,6 +35,7 @@ import (
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
+ "google.golang.org/grpc/internal"
"google.golang.org/grpc/internal/channelz"
"google.golang.org/grpc/internal/syscall"
"google.golang.org/grpc/keepalive"
@@ -44,8 +45,14 @@ import (
"google.golang.org/grpc/status"
)
+// clientConnectionCounter counts the number of connections a client has
+// initiated (equal to the number of http2Clients created). Must be accessed
+// atomically.
+var clientConnectionCounter uint64
+
// http2Client implements the ClientTransport interface with HTTP2.
type http2Client struct {
+ lastRead int64 // Keep this field 64-bit aligned. Accessed atomically.
ctx context.Context
cancel context.CancelFunc
ctxDone <-chan struct{} // Cache the ctx.Done() chan.
@@ -62,8 +69,6 @@ type http2Client struct {
// goAway is closed to notify the upper layer (i.e., addrConn.transportMonitor)
// that the server sent GoAway on this transport.
goAway chan struct{}
- // awakenKeepalive is used to wake up keepalive when after it has gone dormant.
- awakenKeepalive chan struct{}
framer *framer
// controlBuf delivers all the control related tasks (e.g., window
@@ -77,9 +82,6 @@ type http2Client struct {
perRPCCreds []credentials.PerRPCCredentials
- // Boolean to keep track of reading activity on transport.
- // 1 is true and 0 is false.
- activity uint32 // Accessed atomically.
kp keepalive.ClientParameters
keepaliveEnabled bool
@@ -110,6 +112,16 @@ type http2Client struct {
// goAwayReason records the http2.ErrCode and debug data received with the
// GoAway frame.
goAwayReason GoAwayReason
+ // A condition variable used to signal when the keepalive goroutine should
+ // go dormant. The condition for dormancy is based on the number of active
+ // streams and the `PermitWithoutStream` keepalive client parameter. And
+ // since the number of active streams is guarded by the above mutex, we use
+ // the same for this condition variable as well.
+ kpDormancyCond *sync.Cond
+ // A boolean to track whether the keepalive goroutine is dormant or not.
+ // This is checked before attempting to signal the above condition
+ // variable.
+ kpDormant bool
// Fields below are for channelz metric collection.
channelzID int64 // channelz unique identification number
@@ -119,6 +131,8 @@ type http2Client struct {
onClose func()
bufferPool *bufferPool
+
+ connectionID uint64
}
func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr string) (net.Conn, error) {
@@ -232,7 +246,6 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne
readerDone: make(chan struct{}),
writerDone: make(chan struct{}),
goAway: make(chan struct{}),
- awakenKeepalive: make(chan struct{}, 1),
framer: newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize),
fc: &trInFlow{limit: uint32(icwz)},
scheme: scheme,
@@ -264,9 +277,6 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne
updateFlowControl: t.updateFlowControl,
}
}
- // Make sure awakenKeepalive can't be written upon.
- // keepalive routine will make it writable, if need be.
- t.awakenKeepalive <- struct{}{}
if t.statsHandler != nil {
t.ctx = t.statsHandler.TagConn(t.ctx, &stats.ConnTagInfo{
RemoteAddr: t.remoteAddr,
@@ -281,6 +291,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne
t.channelzID = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, fmt.Sprintf("%s -> %s", t.localAddr, t.remoteAddr))
}
if t.keepaliveEnabled {
+ t.kpDormancyCond = sync.NewCond(&t.mu)
go t.keepalive()
}
// Start the reader goroutine for incoming message. Each transport has
@@ -325,6 +336,8 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne
}
}
+ t.connectionID = atomic.AddUint64(&clientConnectionCounter, 1)
+
if err := t.framer.writer.Flush(); err != nil {
return nil, err
}
@@ -347,6 +360,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne
func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream {
// TODO(zhaoq): Handle uint32 overflow of Stream.id.
s := &Stream{
+ ct: t,
done: make(chan struct{}),
method: callHdr.Method,
sendCompress: callHdr.SendCompress,
@@ -380,23 +394,24 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream {
}
func (t *http2Client) getPeer() *peer.Peer {
- pr := &peer.Peer{
- Addr: t.remoteAddr,
+ return &peer.Peer{
+ Addr: t.remoteAddr,
+ AuthInfo: t.authInfo,
}
- // Attach Auth info if there is any.
- if t.authInfo != nil {
- pr.AuthInfo = t.authInfo
- }
- return pr
}
func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) ([]hpack.HeaderField, error) {
aud := t.createAudience(callHdr)
- authData, err := t.getTrAuthData(ctx, aud)
+ ri := credentials.RequestInfo{
+ Method: callHdr.Method,
+ AuthInfo: t.authInfo,
+ }
+ ctxWithRequestInfo := internal.NewRequestInfoContext.(func(context.Context, credentials.RequestInfo) context.Context)(ctx, ri)
+ authData, err := t.getTrAuthData(ctxWithRequestInfo, aud)
if err != nil {
return nil, err
}
- callAuthData, err := t.getCallAuthData(ctx, aud, callHdr)
+ callAuthData, err := t.getCallAuthData(ctxWithRequestInfo, aud, callHdr)
if err != nil {
return nil, err
}
@@ -419,6 +434,7 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr)
if callHdr.SendCompress != "" {
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress})
+ headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-accept-encoding", Value: callHdr.SendCompress})
}
if dl, ok := ctx.Deadline(); ok {
// Send out timeout regardless its value. The server can detect timeout context by itself.
@@ -564,7 +580,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
hdr := &headerFrame{
hf: headerFields,
endStream: false,
- initStream: func(id uint32) (bool, error) {
+ initStream: func(id uint32) error {
t.mu.Lock()
if state := t.state; state != reachable {
t.mu.Unlock()
@@ -574,29 +590,19 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
err = ErrConnClosing
}
cleanup(err)
- return false, err
+ return err
}
t.activeStreams[id] = s
if channelz.IsOn() {
atomic.AddInt64(&t.czData.streamsStarted, 1)
atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano())
}
- var sendPing bool
- // If the number of active streams change from 0 to 1, then check if keepalive
- // has gone dormant. If so, wake it up.
- if len(t.activeStreams) == 1 && t.keepaliveEnabled {
- select {
- case t.awakenKeepalive <- struct{}{}:
- sendPing = true
- // Fill the awakenKeepalive channel again as this channel must be
- // kept non-writable except at the point that the keepalive()
- // goroutine is waiting either to be awaken or shutdown.
- t.awakenKeepalive <- struct{}{}
- default:
- }
+ // If the keepalive goroutine has gone dormant, wake it up.
+ if t.kpDormant {
+ t.kpDormancyCond.Signal()
}
t.mu.Unlock()
- return sendPing, nil
+ return nil
},
onOrphaned: cleanup,
wq: s.wq,
@@ -674,12 +680,14 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
}
}
if t.statsHandler != nil {
+ header, _, _ := metadata.FromOutgoingContextRaw(ctx)
outHeader := &stats.OutHeader{
Client: true,
FullMethod: callHdr.Method,
RemoteAddr: t.remoteAddr,
LocalAddr: t.localAddr,
Compression: callHdr.SendCompress,
+ Header: header.Copy(),
}
t.statsHandler.HandleRPC(s.ctx, outHeader)
}
@@ -778,6 +786,11 @@ func (t *http2Client) Close() error {
t.state = closing
streams := t.activeStreams
t.activeStreams = nil
+ if t.kpDormant {
+ // If the keepalive goroutine is blocked on this condition variable, we
+ // should unblock it so that the goroutine eventually exits.
+ t.kpDormancyCond.Signal()
+ }
t.mu.Unlock()
t.controlBuf.finish()
t.cancel()
@@ -853,11 +866,11 @@ func (t *http2Client) Write(s *Stream, hdr []byte, data []byte, opts *Options) e
return t.controlBuf.put(df)
}
-func (t *http2Client) getStream(f http2.Frame) (*Stream, bool) {
+func (t *http2Client) getStream(f http2.Frame) *Stream {
t.mu.Lock()
- defer t.mu.Unlock()
- s, ok := t.activeStreams[f.Header().StreamID]
- return s, ok
+ s := t.activeStreams[f.Header().StreamID]
+ t.mu.Unlock()
+ return s
}
// adjustWindow sends out extra window update over the initial window size
@@ -937,8 +950,8 @@ func (t *http2Client) handleData(f *http2.DataFrame) {
t.controlBuf.put(bdpPing)
}
// Select the right stream to dispatch.
- s, ok := t.getStream(f)
- if !ok {
+ s := t.getStream(f)
+ if s == nil {
return
}
if size > 0 {
@@ -969,8 +982,8 @@ func (t *http2Client) handleData(f *http2.DataFrame) {
}
func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) {
- s, ok := t.getStream(f)
- if !ok {
+ s := t.getStream(f)
+ if s == nil {
return
}
if f.ErrCode == http2.ErrCodeRefusedStream {
@@ -1147,8 +1160,8 @@ func (t *http2Client) handleWindowUpdate(f *http2.WindowUpdateFrame) {
// operateHeaders takes action on the decoded headers.
func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
- s, ok := t.getStream(frame)
- if !ok {
+ s := t.getStream(frame)
+ if s == nil {
return
}
endStream := frame.StreamEnded()
@@ -1177,12 +1190,14 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
inHeader := &stats.InHeader{
Client: true,
WireLength: int(frame.Header().Length),
+ Header: s.header.Copy(),
}
t.statsHandler.HandleRPC(s.ctx, inHeader)
} else {
inTrailer := &stats.InTrailer{
Client: true,
WireLength: int(frame.Header().Length),
+ Trailer: s.trailer.Copy(),
}
t.statsHandler.HandleRPC(s.ctx, inTrailer)
}
@@ -1191,6 +1206,7 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
// If headerChan hasn't been closed yet
if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) {
+ s.headerValid = true
if !endStream {
// HEADERS frame block carries a Response-Headers.
isHeader = true
@@ -1233,7 +1249,7 @@ func (t *http2Client) reader() {
}
t.conn.SetReadDeadline(time.Time{}) // reset deadline once we get the settings frame (we didn't time out, yay!)
if t.keepaliveEnabled {
- atomic.CompareAndSwapUint32(&t.activity, 0, 1)
+ atomic.StoreInt64(&t.lastRead, time.Now().UnixNano())
}
sf, ok := frame.(*http2.SettingsFrame)
if !ok {
@@ -1248,7 +1264,7 @@ func (t *http2Client) reader() {
t.controlBuf.throttle()
frame, err := t.framer.fr.ReadFrame()
if t.keepaliveEnabled {
- atomic.CompareAndSwapUint32(&t.activity, 0, 1)
+ atomic.StoreInt64(&t.lastRead, time.Now().UnixNano())
}
if err != nil {
// Abort an active stream if the http2.Framer returns a
@@ -1292,56 +1308,83 @@ func (t *http2Client) reader() {
}
}
+func minTime(a, b time.Duration) time.Duration {
+ if a < b {
+ return a
+ }
+ return b
+}
+
// keepalive running in a separate goroutune makes sure the connection is alive by sending pings.
func (t *http2Client) keepalive() {
p := &ping{data: [8]byte{}}
+ // True iff a ping has been sent, and no data has been received since then.
+ outstandingPing := false
+ // Amount of time remaining before which we should receive an ACK for the
+ // last sent ping.
+ timeoutLeft := time.Duration(0)
+ // Records the last value of t.lastRead before we go block on the timer.
+ // This is required to check for read activity since then.
+ prevNano := time.Now().UnixNano()
timer := time.NewTimer(t.kp.Time)
for {
select {
case <-timer.C:
- if atomic.CompareAndSwapUint32(&t.activity, 1, 0) {
- timer.Reset(t.kp.Time)
+ lastRead := atomic.LoadInt64(&t.lastRead)
+ if lastRead > prevNano {
+ // There has been read activity since the last time we were here.
+ outstandingPing = false
+ // Next timer should fire at kp.Time seconds from lastRead time.
+ timer.Reset(time.Duration(lastRead) + t.kp.Time - time.Duration(time.Now().UnixNano()))
+ prevNano = lastRead
continue
}
- // Check if keepalive should go dormant.
+ if outstandingPing && timeoutLeft <= 0 {
+ t.Close()
+ return
+ }
t.mu.Lock()
- if len(t.activeStreams) < 1 && !t.kp.PermitWithoutStream {
- // Make awakenKeepalive writable.
- <-t.awakenKeepalive
- t.mu.Unlock()
- select {
- case <-t.awakenKeepalive:
- // If the control gets here a ping has been sent
- // need to reset the timer with keepalive.Timeout.
- case <-t.ctx.Done():
- return
- }
- } else {
+ if t.state == closing {
+ // If the transport is closing, we should exit from the
+ // keepalive goroutine here. If not, we could have a race
+ // between the call to Signal() from Close() and the call to
+ // Wait() here, whereby the keepalive goroutine ends up
+ // blocking on the condition variable which will never be
+ // signalled again.
t.mu.Unlock()
+ return
+ }
+ if len(t.activeStreams) < 1 && !t.kp.PermitWithoutStream {
+ // If a ping was sent out previously (because there were active
+ // streams at that point) which wasn't acked and its timeout
+ // hadn't fired, but we got here and are about to go dormant,
+ // we should make sure that we unconditionally send a ping once
+ // we awaken.
+ outstandingPing = false
+ t.kpDormant = true
+ t.kpDormancyCond.Wait()
+ }
+ t.kpDormant = false
+ t.mu.Unlock()
+
+ // We get here either because we were dormant and a new stream was
+ // created which unblocked the Wait() call, or because the
+ // keepalive timer expired. In both cases, we need to send a ping.
+ if !outstandingPing {
if channelz.IsOn() {
atomic.AddInt64(&t.czData.kpCount, 1)
}
- // Send ping.
t.controlBuf.put(p)
+ timeoutLeft = t.kp.Timeout
+ outstandingPing = true
}
-
- // By the time control gets here a ping has been sent one way or the other.
- timer.Reset(t.kp.Timeout)
- select {
- case <-timer.C:
- if atomic.CompareAndSwapUint32(&t.activity, 1, 0) {
- timer.Reset(t.kp.Time)
- continue
- }
- infof("transport: closing client transport due to idleness.")
- t.Close()
- return
- case <-t.ctx.Done():
- if !timer.Stop() {
- <-timer.C
- }
- return
- }
+ // The amount of time to sleep here is the minimum of kp.Time and
+ // timeoutLeft. This will ensure that we wait only for kp.Time
+ // before sending out the next ping (for cases where the ping is
+ // acked).
+ sleepDuration := minTime(t.kp.Time, timeoutLeft)
+ timeoutLeft -= sleepDuration
+ timer.Reset(sleepDuration)
case <-t.ctx.Done():
if !timer.Stop() {
<-timer.C
diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go
index 4e26f6a1d..8b04b0392 100644
--- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go
+++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go
@@ -62,11 +62,15 @@ var (
statusRawProto = internal.StatusRawProto.(func(*status.Status) *spb.Status)
)
+// serverConnectionCounter counts the number of connections a server has seen
+// (equal to the number of http2Servers created). Must be accessed atomically.
+var serverConnectionCounter uint64
+
// http2Server implements the ServerTransport interface with HTTP2.
type http2Server struct {
+ lastRead int64 // Keep this field 64-bit aligned. Accessed atomically.
ctx context.Context
- ctxDone <-chan struct{} // Cache the context.Done() chan
- cancel context.CancelFunc
+ done chan struct{}
conn net.Conn
loopy *loopyWriter
readerDone chan struct{} // sync point to enable testing.
@@ -84,12 +88,8 @@ type http2Server struct {
controlBuf *controlBuffer
fc *trInFlow
stats stats.Handler
- // Flag to keep track of reading activity on transport.
- // 1 is true and 0 is false.
- activity uint32 // Accessed atomically.
// Keepalive and max-age parameters for the server.
kp keepalive.ServerParameters
-
// Keepalive enforcement policy.
kep keepalive.EnforcementPolicy
// The time instance last ping was received.
@@ -125,6 +125,8 @@ type http2Server struct {
channelzID int64 // channelz unique identification number
czData *channelzData
bufferPool *bufferPool
+
+ connectionID uint64
}
// newHTTP2Server constructs a ServerTransport based on HTTP2. ConnectionError is
@@ -175,6 +177,12 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err
Val: *config.MaxHeaderListSize,
})
}
+ if config.HeaderTableSize != nil {
+ isettings = append(isettings, http2.Setting{
+ ID: http2.SettingHeaderTableSize,
+ Val: *config.HeaderTableSize,
+ })
+ }
if err := framer.fr.WriteSettings(isettings...); err != nil {
return nil, connectionErrorf(false, err, "transport: %v", err)
}
@@ -206,11 +214,10 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err
if kep.MinTime == 0 {
kep.MinTime = defaultKeepalivePolicyMinTime
}
- ctx, cancel := context.WithCancel(context.Background())
+ done := make(chan struct{})
t := &http2Server{
- ctx: ctx,
- cancel: cancel,
- ctxDone: ctx.Done(),
+ ctx: context.Background(),
+ done: done,
conn: conn,
remoteAddr: conn.RemoteAddr(),
localAddr: conn.LocalAddr(),
@@ -231,7 +238,7 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err
czData: new(channelzData),
bufferPool: newBufferPool(),
}
- t.controlBuf = newControlBuffer(t.ctxDone)
+ t.controlBuf = newControlBuffer(t.done)
if dynamicWindow {
t.bdpEst = &bdpEstimator{
bdp: initialWindowSize,
@@ -249,6 +256,9 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err
if channelz.IsOn() {
t.channelzID = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr))
}
+
+ t.connectionID = atomic.AddUint64(&serverConnectionCounter, 1)
+
t.framer.writer.Flush()
defer func() {
@@ -273,7 +283,7 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err
if err != nil {
return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to read initial settings frame: %v", err)
}
- atomic.StoreUint32(&t.activity, 1)
+ atomic.StoreInt64(&t.lastRead, time.Now().UnixNano())
sf, ok := frame.(*http2.SettingsFrame)
if !ok {
return nil, connectionErrorf(false, nil, "transport: http2Server.HandleStreams saw invalid preface type %T from client", frame)
@@ -362,12 +372,14 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
rstCode: http2.ErrCodeRefusedStream,
onWrite: func() {},
})
+ s.cancel()
return false
}
}
t.mu.Lock()
if t.state != reachable {
t.mu.Unlock()
+ s.cancel()
return false
}
if uint32(len(t.activeStreams)) >= t.maxStreams {
@@ -378,12 +390,14 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
rstCode: http2.ErrCodeRefusedStream,
onWrite: func() {},
})
+ s.cancel()
return false
}
if streamID%2 != 1 || streamID <= t.maxStreamID {
t.mu.Unlock()
// illegal gRPC stream id.
errorf("transport: http2Server.HandleStreams received an illegal stream id: %v", streamID)
+ s.cancel()
return true
}
t.maxStreamID = streamID
@@ -408,6 +422,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
LocalAddr: t.localAddr,
Compression: s.recvCompress,
WireLength: int(frame.Header().Length),
+ Header: metadata.MD(state.data.mdata).Copy(),
}
t.stats.HandleRPC(s.ctx, inHeader)
}
@@ -441,7 +456,7 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.
for {
t.controlBuf.throttle()
frame, err := t.framer.fr.ReadFrame()
- atomic.StoreUint32(&t.activity, 1)
+ atomic.StoreInt64(&t.lastRead, time.Now().UnixNano())
if err != nil {
if se, ok := err.(http2.StreamError); ok {
warningf("transport: http2Server.HandleStreams encountered http2.StreamError: %v", se)
@@ -749,7 +764,7 @@ func (t *http2Server) checkForHeaderListSize(it interface{}) bool {
return true
}
-// WriteHeader sends the header metedata md back to the client.
+// WriteHeader sends the header metadata md back to the client.
func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error {
if s.updateHeaderSent() || s.getState() == streamDone {
return ErrIllegalHeaderWrite
@@ -800,7 +815,9 @@ func (t *http2Server) writeHeaderLocked(s *Stream) error {
if t.stats != nil {
// Note: WireLength is not set in outHeader.
// TODO(mmukhi): Revisit this later, if needed.
- outHeader := &stats.OutHeader{}
+ outHeader := &stats.OutHeader{
+ Header: s.header.Copy(),
+ }
t.stats.HandleRPC(s.Context(), outHeader)
}
return nil
@@ -863,7 +880,9 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error {
rst := s.getState() == streamActive
t.finishStream(s, rst, http2.ErrCodeNo, trailingHeader, true)
if t.stats != nil {
- t.stats.HandleRPC(s.Context(), &stats.OutTrailer{})
+ t.stats.HandleRPC(s.Context(), &stats.OutTrailer{
+ Trailer: s.trailer.Copy(),
+ })
}
return nil
}
@@ -885,7 +904,7 @@ func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) e
// TODO(mmukhi, dfawley): Should the server write also return io.EOF?
s.cancel()
select {
- case <-t.ctx.Done():
+ case <-t.done:
return ErrConnClosing
default:
}
@@ -907,7 +926,7 @@ func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) e
}
if err := s.wq.get(int32(len(hdr) + len(data))); err != nil {
select {
- case <-t.ctx.Done():
+ case <-t.done:
return ErrConnClosing
default:
}
@@ -924,32 +943,35 @@ func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) e
// after an additional duration of keepalive.Timeout.
func (t *http2Server) keepalive() {
p := &ping{}
- var pingSent bool
- maxIdle := time.NewTimer(t.kp.MaxConnectionIdle)
- maxAge := time.NewTimer(t.kp.MaxConnectionAge)
- keepalive := time.NewTimer(t.kp.Time)
- // NOTE: All exit paths of this function should reset their
- // respective timers. A failure to do so will cause the
- // following clean-up to deadlock and eventually leak.
+ // True iff a ping has been sent, and no data has been received since then.
+ outstandingPing := false
+ // Amount of time remaining before which we should receive an ACK for the
+ // last sent ping.
+ kpTimeoutLeft := time.Duration(0)
+ // Records the last value of t.lastRead before we go block on the timer.
+ // This is required to check for read activity since then.
+ prevNano := time.Now().UnixNano()
+ // Initialize the different timers to their default values.
+ idleTimer := time.NewTimer(t.kp.MaxConnectionIdle)
+ ageTimer := time.NewTimer(t.kp.MaxConnectionAge)
+ kpTimer := time.NewTimer(t.kp.Time)
defer func() {
- if !maxIdle.Stop() {
- <-maxIdle.C
- }
- if !maxAge.Stop() {
- <-maxAge.C
- }
- if !keepalive.Stop() {
- <-keepalive.C
- }
+ // We need to drain the underlying channel in these timers after a call
+ // to Stop(), only if we are interested in resetting them. Clearly we
+ // are not interested in resetting them here.
+ idleTimer.Stop()
+ ageTimer.Stop()
+ kpTimer.Stop()
}()
+
for {
select {
- case <-maxIdle.C:
+ case <-idleTimer.C:
t.mu.Lock()
idle := t.idle
if idle.IsZero() { // The connection is non-idle.
t.mu.Unlock()
- maxIdle.Reset(t.kp.MaxConnectionIdle)
+ idleTimer.Reset(t.kp.MaxConnectionIdle)
continue
}
val := t.kp.MaxConnectionIdle - time.Since(idle)
@@ -958,44 +980,52 @@ func (t *http2Server) keepalive() {
// The connection has been idle for a duration of keepalive.MaxConnectionIdle or more.
// Gracefully close the connection.
t.drain(http2.ErrCodeNo, []byte{})
- // Resetting the timer so that the clean-up doesn't deadlock.
- maxIdle.Reset(infinity)
return
}
- maxIdle.Reset(val)
- case <-maxAge.C:
+ idleTimer.Reset(val)
+ case <-ageTimer.C:
t.drain(http2.ErrCodeNo, []byte{})
- maxAge.Reset(t.kp.MaxConnectionAgeGrace)
+ ageTimer.Reset(t.kp.MaxConnectionAgeGrace)
select {
- case <-maxAge.C:
+ case <-ageTimer.C:
// Close the connection after grace period.
infof("transport: closing server transport due to maximum connection age.")
t.Close()
- // Resetting the timer so that the clean-up doesn't deadlock.
- maxAge.Reset(infinity)
- case <-t.ctx.Done():
+ case <-t.done:
}
return
- case <-keepalive.C:
- if atomic.CompareAndSwapUint32(&t.activity, 1, 0) {
- pingSent = false
- keepalive.Reset(t.kp.Time)
+ case <-kpTimer.C:
+ lastRead := atomic.LoadInt64(&t.lastRead)
+ if lastRead > prevNano {
+ // There has been read activity since the last time we were
+ // here. Setup the timer to fire at kp.Time seconds from
+ // lastRead time and continue.
+ outstandingPing = false
+ kpTimer.Reset(time.Duration(lastRead) + t.kp.Time - time.Duration(time.Now().UnixNano()))
+ prevNano = lastRead
continue
}
- if pingSent {
+ if outstandingPing && kpTimeoutLeft <= 0 {
infof("transport: closing server transport due to idleness.")
t.Close()
- // Resetting the timer so that the clean-up doesn't deadlock.
- keepalive.Reset(infinity)
return
}
- pingSent = true
- if channelz.IsOn() {
- atomic.AddInt64(&t.czData.kpCount, 1)
+ if !outstandingPing {
+ if channelz.IsOn() {
+ atomic.AddInt64(&t.czData.kpCount, 1)
+ }
+ t.controlBuf.put(p)
+ kpTimeoutLeft = t.kp.Timeout
+ outstandingPing = true
}
- t.controlBuf.put(p)
- keepalive.Reset(t.kp.Timeout)
- case <-t.ctx.Done():
+ // The amount of time to sleep here is the minimum of kp.Time and
+ // timeoutLeft. This will ensure that we wait only for kp.Time
+ // before sending out the next ping (for cases where the ping is
+ // acked).
+ sleepDuration := minTime(t.kp.Time, kpTimeoutLeft)
+ kpTimeoutLeft -= sleepDuration
+ kpTimer.Reset(sleepDuration)
+ case <-t.done:
return
}
}
@@ -1015,7 +1045,7 @@ func (t *http2Server) Close() error {
t.activeStreams = nil
t.mu.Unlock()
t.controlBuf.finish()
- t.cancel()
+ close(t.done)
err := t.conn.Close()
if channelz.IsOn() {
channelz.RemoveEntry(t.channelzID)
@@ -1155,7 +1185,7 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) {
select {
case <-t.drainChan:
case <-timer.C:
- case <-t.ctx.Done():
+ case <-t.done:
return
}
t.controlBuf.put(&goAway{code: g.code, debugData: g.debugData})
@@ -1205,7 +1235,7 @@ func (t *http2Server) getOutFlowWindow() int64 {
select {
case sz := <-resp:
return int64(sz)
- case <-t.ctxDone:
+ case <-t.done:
return -1
case <-timer.C:
return -2
diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go
index 1c1d10670..a30da9eb3 100644
--- a/vendor/google.golang.org/grpc/internal/transport/transport.go
+++ b/vendor/google.golang.org/grpc/internal/transport/transport.go
@@ -73,10 +73,11 @@ type recvMsg struct {
}
// recvBuffer is an unbounded channel of recvMsg structs.
-// Note recvBuffer differs from controlBuffer only in that recvBuffer
-// holds a channel of only recvMsg structs instead of objects implementing "item" interface.
-// recvBuffer is written to much more often than
-// controlBuffer and using strict recvMsg structs helps avoid allocation in "recvBuffer.put"
+//
+// Note: recvBuffer differs from buffer.Unbounded only in the fact that it
+// holds a channel of recvMsg structs instead of objects implementing "item"
+// interface. recvBuffer is written to much more often and using strict recvMsg
+// structs helps avoid allocation in "recvBuffer.put"
type recvBuffer struct {
c chan recvMsg
mu sync.Mutex
@@ -233,6 +234,7 @@ const (
type Stream struct {
id uint32
st ServerTransport // nil for client side Stream
+ ct *http2Client // nil for server side Stream
ctx context.Context // the associated context of the stream
cancel context.CancelFunc // always nil for client side Stream
done chan struct{} // closed at the end of stream to unblock writers. On the client side.
@@ -251,6 +253,10 @@ type Stream struct {
headerChan chan struct{} // closed to indicate the end of header metadata.
headerChanClosed uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times.
+ // headerValid indicates whether a valid header was received. Only
+ // meaningful after headerChan is closed (always call waitOnHeader() before
+ // reading its value). Not valid on server side.
+ headerValid bool
// hdrMu protects header and trailer metadata on the server-side.
hdrMu sync.Mutex
@@ -303,34 +309,28 @@ func (s *Stream) getState() streamState {
return streamState(atomic.LoadUint32((*uint32)(&s.state)))
}
-func (s *Stream) waitOnHeader() error {
+func (s *Stream) waitOnHeader() {
if s.headerChan == nil {
// On the server headerChan is always nil since a stream originates
// only after having received headers.
- return nil
+ return
}
select {
case <-s.ctx.Done():
- // We prefer success over failure when reading messages because we delay
- // context error in stream.Read(). To keep behavior consistent, we also
- // prefer success here.
- select {
- case <-s.headerChan:
- return nil
- default:
- }
- return ContextErr(s.ctx.Err())
+ // Close the stream to prevent headers/trailers from changing after
+ // this function returns.
+ s.ct.CloseStream(s, ContextErr(s.ctx.Err()))
+ // headerChan could possibly not be closed yet if closeStream raced
+ // with operateHeaders; wait until it is closed explicitly here.
+ <-s.headerChan
case <-s.headerChan:
- return nil
}
}
// RecvCompress returns the compression algorithm applied to the inbound
// message. It is empty string if there is no compression applied.
func (s *Stream) RecvCompress() string {
- if err := s.waitOnHeader(); err != nil {
- return ""
- }
+ s.waitOnHeader()
return s.recvCompress
}
@@ -351,36 +351,27 @@ func (s *Stream) Done() <-chan struct{} {
// available. It blocks until i) the metadata is ready or ii) there is no header
// metadata or iii) the stream is canceled/expired.
//
-// On server side, it returns the out header after t.WriteHeader is called.
+// On server side, it returns the out header after t.WriteHeader is called. It
+// does not block and must not be called until after WriteHeader.
func (s *Stream) Header() (metadata.MD, error) {
- if s.headerChan == nil && s.header != nil {
+ if s.headerChan == nil {
// On server side, return the header in stream. It will be the out
// header after t.WriteHeader is called.
return s.header.Copy(), nil
}
- err := s.waitOnHeader()
- // Even if the stream is closed, header is returned if available.
- select {
- case <-s.headerChan:
- if s.header == nil {
- return nil, nil
- }
- return s.header.Copy(), nil
- default:
+ s.waitOnHeader()
+ if !s.headerValid {
+ return nil, s.status.Err()
}
- return nil, err
+ return s.header.Copy(), nil
}
// TrailersOnly blocks until a header or trailers-only frame is received and
// then returns true if the stream was trailers-only. If the stream ends
-// before headers are received, returns true, nil. If a context error happens
-// first, returns it as a status error. Client-side only.
-func (s *Stream) TrailersOnly() (bool, error) {
- err := s.waitOnHeader()
- if err != nil {
- return false, err
- }
- return s.noHeaders, nil
+// before headers are received, returns true, nil. Client-side only.
+func (s *Stream) TrailersOnly() bool {
+ s.waitOnHeader()
+ return s.noHeaders
}
// Trailer returns the cached trailer metedata. Note that if it is not called
@@ -534,6 +525,7 @@ type ServerConfig struct {
ReadBufferSize int
ChannelzParentID int64
MaxHeaderListSize *uint32
+ HeaderTableSize *uint32
}
// NewServerTransport creates a ServerTransport with conn or non-nil error
diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go
index 45baa2ae1..00447894f 100644
--- a/vendor/google.golang.org/grpc/picker_wrapper.go
+++ b/vendor/google.golang.org/grpc/picker_wrapper.go
@@ -20,6 +20,7 @@ package grpc
import (
"context"
+ "fmt"
"io"
"sync"
@@ -31,49 +32,78 @@ import (
"google.golang.org/grpc/status"
)
+// v2PickerWrapper wraps a balancer.Picker while providing the
+// balancer.V2Picker API. It requires a pickerWrapper to generate errors
+// including the latest connectionError. To be deleted when balancer.Picker is
+// updated to the balancer.V2Picker API.
+type v2PickerWrapper struct {
+ picker balancer.Picker
+ connErr *connErr
+}
+
+func (v *v2PickerWrapper) Pick(info balancer.PickInfo) (balancer.PickResult, error) {
+ sc, done, err := v.picker.Pick(info.Ctx, info)
+ if err != nil {
+ if err == balancer.ErrTransientFailure {
+ return balancer.PickResult{}, balancer.TransientFailureError(fmt.Errorf("%v, latest connection error: %v", err, v.connErr.connectionError()))
+ }
+ return balancer.PickResult{}, err
+ }
+ return balancer.PickResult{SubConn: sc, Done: done}, nil
+}
+
// pickerWrapper is a wrapper of balancer.Picker. It blocks on certain pick
// actions and unblock when there's a picker update.
type pickerWrapper struct {
mu sync.Mutex
done bool
blockingCh chan struct{}
- picker balancer.Picker
+ picker balancer.V2Picker
- // The latest connection happened.
- connErrMu sync.Mutex
- connErr error
+ // The latest connection error. TODO: remove when V1 picker is deprecated;
+ // balancer should be responsible for providing the error.
+ *connErr
}
-func newPickerWrapper() *pickerWrapper {
- bp := &pickerWrapper{blockingCh: make(chan struct{})}
- return bp
+type connErr struct {
+ mu sync.Mutex
+ err error
}
-func (bp *pickerWrapper) updateConnectionError(err error) {
- bp.connErrMu.Lock()
- bp.connErr = err
- bp.connErrMu.Unlock()
+func (c *connErr) updateConnectionError(err error) {
+ c.mu.Lock()
+ c.err = err
+ c.mu.Unlock()
}
-func (bp *pickerWrapper) connectionError() error {
- bp.connErrMu.Lock()
- err := bp.connErr
- bp.connErrMu.Unlock()
+func (c *connErr) connectionError() error {
+ c.mu.Lock()
+ err := c.err
+ c.mu.Unlock()
return err
}
+func newPickerWrapper() *pickerWrapper {
+ return &pickerWrapper{blockingCh: make(chan struct{}), connErr: &connErr{}}
+}
+
// updatePicker is called by UpdateBalancerState. It unblocks all blocked pick.
-func (bp *pickerWrapper) updatePicker(p balancer.Picker) {
- bp.mu.Lock()
- if bp.done {
- bp.mu.Unlock()
+func (pw *pickerWrapper) updatePicker(p balancer.Picker) {
+ pw.updatePickerV2(&v2PickerWrapper{picker: p, connErr: pw.connErr})
+}
+
+// updatePicker is called by UpdateBalancerState. It unblocks all blocked pick.
+func (pw *pickerWrapper) updatePickerV2(p balancer.V2Picker) {
+ pw.mu.Lock()
+ if pw.done {
+ pw.mu.Unlock()
return
}
- bp.picker = p
- // bp.blockingCh should never be nil.
- close(bp.blockingCh)
- bp.blockingCh = make(chan struct{})
- bp.mu.Unlock()
+ pw.picker = p
+ // pw.blockingCh should never be nil.
+ close(pw.blockingCh)
+ pw.blockingCh = make(chan struct{})
+ pw.mu.Unlock()
}
func doneChannelzWrapper(acw *acBalancerWrapper, done func(balancer.DoneInfo)) func(balancer.DoneInfo) {
@@ -100,83 +130,85 @@ func doneChannelzWrapper(acw *acBalancerWrapper, done func(balancer.DoneInfo)) f
// - the current picker returns other errors and failfast is false.
// - the subConn returned by the current picker is not READY
// When one of these situations happens, pick blocks until the picker gets updated.
-func (bp *pickerWrapper) pick(ctx context.Context, failfast bool, opts balancer.PickOptions) (transport.ClientTransport, func(balancer.DoneInfo), error) {
+func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.PickInfo) (transport.ClientTransport, func(balancer.DoneInfo), error) {
var ch chan struct{}
+ var lastPickErr error
for {
- bp.mu.Lock()
- if bp.done {
- bp.mu.Unlock()
+ pw.mu.Lock()
+ if pw.done {
+ pw.mu.Unlock()
return nil, nil, ErrClientConnClosing
}
- if bp.picker == nil {
- ch = bp.blockingCh
+ if pw.picker == nil {
+ ch = pw.blockingCh
}
- if ch == bp.blockingCh {
+ if ch == pw.blockingCh {
// This could happen when either:
- // - bp.picker is nil (the previous if condition), or
+ // - pw.picker is nil (the previous if condition), or
// - has called pick on the current picker.
- bp.mu.Unlock()
+ pw.mu.Unlock()
select {
case <-ctx.Done():
- if connectionErr := bp.connectionError(); connectionErr != nil {
- switch ctx.Err() {
- case context.DeadlineExceeded:
- return nil, nil, status.Errorf(codes.DeadlineExceeded, "latest connection error: %v", connectionErr)
- case context.Canceled:
- return nil, nil, status.Errorf(codes.Canceled, "latest connection error: %v", connectionErr)
- }
+ var errStr string
+ if lastPickErr != nil {
+ errStr = "latest balancer error: " + lastPickErr.Error()
+ } else if connectionErr := pw.connectionError(); connectionErr != nil {
+ errStr = "latest connection error: " + connectionErr.Error()
+ } else {
+ errStr = ctx.Err().Error()
+ }
+ switch ctx.Err() {
+ case context.DeadlineExceeded:
+ return nil, nil, status.Error(codes.DeadlineExceeded, errStr)
+ case context.Canceled:
+ return nil, nil, status.Error(codes.Canceled, errStr)
}
- return nil, nil, ctx.Err()
case <-ch:
}
continue
}
- ch = bp.blockingCh
- p := bp.picker
- bp.mu.Unlock()
+ ch = pw.blockingCh
+ p := pw.picker
+ pw.mu.Unlock()
- subConn, done, err := p.Pick(ctx, opts)
+ pickResult, err := p.Pick(info)
if err != nil {
- switch err {
- case balancer.ErrNoSubConnAvailable:
+ if err == balancer.ErrNoSubConnAvailable {
continue
- case balancer.ErrTransientFailure:
+ }
+ if tfe, ok := err.(interface{ IsTransientFailure() bool }); ok && tfe.IsTransientFailure() {
if !failfast {
+ lastPickErr = err
continue
}
- return nil, nil, status.Errorf(codes.Unavailable, "%v, latest connection error: %v", err, bp.connectionError())
- case context.DeadlineExceeded:
- return nil, nil, status.Error(codes.DeadlineExceeded, err.Error())
- case context.Canceled:
- return nil, nil, status.Error(codes.Canceled, err.Error())
- default:
- if _, ok := status.FromError(err); ok {
- return nil, nil, err
- }
- // err is some other error.
- return nil, nil, status.Error(codes.Unknown, err.Error())
+ return nil, nil, status.Error(codes.Unavailable, err.Error())
}
+ if _, ok := status.FromError(err); ok {
+ return nil, nil, err
+ }
+ // err is some other error.
+ return nil, nil, status.Error(codes.Unknown, err.Error())
}
- acw, ok := subConn.(*acBalancerWrapper)
+ acw, ok := pickResult.SubConn.(*acBalancerWrapper)
if !ok {
grpclog.Error("subconn returned from pick is not *acBalancerWrapper")
continue
}
if t, ok := acw.getAddrConn().getReadyTransport(); ok {
if channelz.IsOn() {
- return t, doneChannelzWrapper(acw, done), nil
+ return t, doneChannelzWrapper(acw, pickResult.Done), nil
}
- return t, done, nil
+ return t, pickResult.Done, nil
}
- if done != nil {
+ if pickResult.Done != nil {
// Calling done with nil error, no bytes sent and no bytes received.
// DoneInfo with default value works.
- done(balancer.DoneInfo{})
+ pickResult.Done(balancer.DoneInfo{})
}
grpclog.Infof("blockingPicker: the picked transport is not ready, loop back to repick")
// If ok == false, ac.state is not READY.
@@ -186,12 +218,12 @@ func (bp *pickerWrapper) pick(ctx context.Context, failfast bool, opts balancer.
}
}
-func (bp *pickerWrapper) close() {
- bp.mu.Lock()
- defer bp.mu.Unlock()
- if bp.done {
+func (pw *pickerWrapper) close() {
+ pw.mu.Lock()
+ defer pw.mu.Unlock()
+ if pw.done {
return
}
- bp.done = true
- close(bp.blockingCh)
+ pw.done = true
+ close(pw.blockingCh)
}
diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/pickfirst.go
index ed05b02ed..c43dac9ad 100644
--- a/vendor/google.golang.org/grpc/pickfirst.go
+++ b/vendor/google.golang.org/grpc/pickfirst.go
@@ -19,12 +19,14 @@
package grpc
import (
- "context"
+ "errors"
"google.golang.org/grpc/balancer"
+ "google.golang.org/grpc/codes"
"google.golang.org/grpc/connectivity"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/resolver"
+ "google.golang.org/grpc/status"
)
// PickFirstBalancerName is the name of the pick_first balancer.
@@ -45,35 +47,67 @@ func (*pickfirstBuilder) Name() string {
}
type pickfirstBalancer struct {
- cc balancer.ClientConn
- sc balancer.SubConn
+ state connectivity.State
+ cc balancer.ClientConn
+ sc balancer.SubConn
}
+var _ balancer.V2Balancer = &pickfirstBalancer{} // Assert we implement v2
+
func (b *pickfirstBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) {
if err != nil {
- if grpclog.V(2) {
- grpclog.Infof("pickfirstBalancer: HandleResolvedAddrs called with error %v", err)
- }
+ b.ResolverError(err)
return
}
+ b.UpdateClientConnState(balancer.ClientConnState{ResolverState: resolver.State{Addresses: addrs}}) // Ignore error
+}
+
+func (b *pickfirstBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
+ b.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: s})
+}
+
+func (b *pickfirstBalancer) ResolverError(err error) {
+ switch b.state {
+ case connectivity.TransientFailure, connectivity.Idle, connectivity.Connecting:
+ // Set a failing picker if we don't have a good picker.
+ b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure,
+ Picker: &picker{err: status.Errorf(codes.Unavailable, "name resolver error: %v", err)}},
+ )
+ }
+ if grpclog.V(2) {
+ grpclog.Infof("pickfirstBalancer: ResolverError called with error %v", err)
+ }
+}
+
+func (b *pickfirstBalancer) UpdateClientConnState(cs balancer.ClientConnState) error {
+ if len(cs.ResolverState.Addresses) == 0 {
+ b.ResolverError(errors.New("produced zero addresses"))
+ return balancer.ErrBadResolverState
+ }
if b.sc == nil {
- b.sc, err = b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{})
+ var err error
+ b.sc, err = b.cc.NewSubConn(cs.ResolverState.Addresses, balancer.NewSubConnOptions{})
if err != nil {
- //TODO(yuxuanli): why not change the cc state to Idle?
if grpclog.V(2) {
grpclog.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err)
}
- return
+ b.state = connectivity.TransientFailure
+ b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure,
+ Picker: &picker{err: status.Errorf(codes.Unavailable, "error creating connection: %v", err)}},
+ )
+ return balancer.ErrBadResolverState
}
- b.cc.UpdateBalancerState(connectivity.Idle, &picker{sc: b.sc})
+ b.state = connectivity.Idle
+ b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.Idle, Picker: &picker{result: balancer.PickResult{SubConn: b.sc}}})
b.sc.Connect()
} else {
- b.sc.UpdateAddresses(addrs)
+ b.sc.UpdateAddresses(cs.ResolverState.Addresses)
b.sc.Connect()
}
+ return nil
}
-func (b *pickfirstBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
+func (b *pickfirstBalancer) UpdateSubConnState(sc balancer.SubConn, s balancer.SubConnState) {
if grpclog.V(2) {
grpclog.Infof("pickfirstBalancer: HandleSubConnStateChange: %p, %v", sc, s)
}
@@ -83,18 +117,28 @@ func (b *pickfirstBalancer) HandleSubConnStateChange(sc balancer.SubConn, s conn
}
return
}
- if s == connectivity.Shutdown {
+ b.state = s.ConnectivityState
+ if s.ConnectivityState == connectivity.Shutdown {
b.sc = nil
return
}
- switch s {
+ switch s.ConnectivityState {
case connectivity.Ready, connectivity.Idle:
- b.cc.UpdateBalancerState(s, &picker{sc: sc})
+ b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &picker{result: balancer.PickResult{SubConn: sc}}})
case connectivity.Connecting:
- b.cc.UpdateBalancerState(s, &picker{err: balancer.ErrNoSubConnAvailable})
+ b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &picker{err: balancer.ErrNoSubConnAvailable}})
case connectivity.TransientFailure:
- b.cc.UpdateBalancerState(s, &picker{err: balancer.ErrTransientFailure})
+ err := balancer.ErrTransientFailure
+ // TODO: this can be unconditional after the V1 API is removed, as
+ // SubConnState will always contain a connection error.
+ if s.ConnectionError != nil {
+ err = balancer.TransientFailureError(s.ConnectionError)
+ }
+ b.cc.UpdateState(balancer.State{
+ ConnectivityState: s.ConnectivityState,
+ Picker: &picker{err: err},
+ })
}
}
@@ -102,15 +146,12 @@ func (b *pickfirstBalancer) Close() {
}
type picker struct {
- err error
- sc balancer.SubConn
+ result balancer.PickResult
+ err error
}
-func (p *picker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
- if p.err != nil {
- return nil, nil, p.err
- }
- return p.sc, nil, nil
+func (p *picker) Pick(info balancer.PickInfo) (balancer.PickResult, error) {
+ return p.result, p.err
}
func init() {
diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go
index e83da346a..fe14b2fb9 100644
--- a/vendor/google.golang.org/grpc/resolver/resolver.go
+++ b/vendor/google.golang.org/grpc/resolver/resolver.go
@@ -21,6 +21,11 @@
package resolver
import (
+ "context"
+ "net"
+
+ "google.golang.org/grpc/attributes"
+ "google.golang.org/grpc/credentials"
"google.golang.org/grpc/serviceconfig"
)
@@ -69,12 +74,18 @@ func GetDefaultScheme() string {
}
// AddressType indicates the address type returned by name resolution.
+//
+// Deprecated: use Attributes in Address instead.
type AddressType uint8
const (
// Backend indicates the address is for a backend server.
+ //
+ // Deprecated: use Attributes in Address instead.
Backend AddressType = iota
// GRPCLB indicates the address is for a grpclb load balancer.
+ //
+ // Deprecated: use Attributes in Address instead.
GRPCLB
)
@@ -83,33 +94,75 @@ const (
type Address struct {
// Addr is the server address on which a connection will be established.
Addr string
- // Type is the type of this address.
- Type AddressType
+
// ServerName is the name of this address.
+ // If non-empty, the ServerName is used as the transport certification authority for
+ // the address, instead of the hostname from the Dial target string. In most cases,
+ // this should not be set.
//
- // e.g. if Type is GRPCLB, ServerName should be the name of the remote load
+ // If Type is GRPCLB, ServerName should be the name of the remote load
// balancer, not the name of the backend.
+ //
+ // WARNING: ServerName must only be populated with trusted values. It
+ // is insecure to populate it with data from untrusted inputs since untrusted
+ // values could be used to bypass the authority checks performed by TLS.
ServerName string
+
+ // Attributes contains arbitrary data about this address intended for
+ // consumption by the load balancing policy.
+ Attributes *attributes.Attributes
+
+ // Type is the type of this address.
+ //
+ // Deprecated: use Attributes instead.
+ Type AddressType
+
// Metadata is the information associated with Addr, which may be used
// to make load balancing decision.
+ //
+ // Deprecated: use Attributes instead.
Metadata interface{}
}
-// BuildOption includes additional information for the builder to create
+// BuildOptions includes additional information for the builder to create
// the resolver.
-type BuildOption struct {
- // DisableServiceConfig indicates whether resolver should fetch service config data.
+type BuildOptions struct {
+ // DisableServiceConfig indicates whether a resolver implementation should
+ // fetch service config data.
DisableServiceConfig bool
+ // DialCreds is the transport credentials used by the ClientConn for
+ // communicating with the target gRPC service (set via
+ // WithTransportCredentials). In cases where a name resolution service
+ // requires the same credentials, the resolver may use this field. In most
+ // cases though, it is not appropriate, and this field may be ignored.
+ DialCreds credentials.TransportCredentials
+ // CredsBundle is the credentials bundle used by the ClientConn for
+ // communicating with the target gRPC service (set via
+ // WithCredentialsBundle). In cases where a name resolution service
+ // requires the same credentials, the resolver may use this field. In most
+ // cases though, it is not appropriate, and this field may be ignored.
+ CredsBundle credentials.Bundle
+ // Dialer is the custom dialer used by the ClientConn for dialling the
+ // target gRPC service (set via WithDialer). In cases where a name
+ // resolution service requires the same dialer, the resolver may use this
+ // field. In most cases though, it is not appropriate, and this field may
+ // be ignored.
+ Dialer func(context.Context, string) (net.Conn, error)
}
// State contains the current Resolver state relevant to the ClientConn.
type State struct {
- Addresses []Address // Resolved addresses for the target
- // ServiceConfig is the parsed service config; obtained from
- // serviceconfig.Parse.
- ServiceConfig serviceconfig.Config
+ // Addresses is the latest set of resolved addresses for the target.
+ Addresses []Address
+
+ // ServiceConfig contains the result from parsing the latest service
+ // config. If it is nil, it indicates no service config is present or the
+ // resolver does not provide service configs.
+ ServiceConfig *serviceconfig.ParseResult
- // TODO: add Err error
+ // Attributes contains arbitrary data about the resolver intended for
+ // consumption by the load balancing policy.
+ Attributes *attributes.Attributes
}
// ClientConn contains the callbacks for resolver to notify any updates
@@ -122,6 +175,10 @@ type State struct {
type ClientConn interface {
// UpdateState updates the state of the ClientConn appropriately.
UpdateState(State)
+ // ReportError notifies the ClientConn that the Resolver encountered an
+ // error. The ClientConn will notify the load balancer and begin calling
+ // ResolveNow on the Resolver with exponential backoff.
+ ReportError(error)
// NewAddress is called by resolver to notify ClientConn a new list
// of resolved addresses.
// The address list should be the complete list of resolved addresses.
@@ -133,6 +190,9 @@ type ClientConn interface {
//
// Deprecated: Use UpdateState instead.
NewServiceConfig(serviceConfig string)
+ // ParseServiceConfig parses the provided service config and returns an
+ // object that provides the parsed config.
+ ParseServiceConfig(serviceConfigJSON string) *serviceconfig.ParseResult
}
// Target represents a target for gRPC, as specified in:
@@ -164,14 +224,14 @@ type Builder interface {
//
// gRPC dial calls Build synchronously, and fails if the returned error is
// not nil.
- Build(target Target, cc ClientConn, opts BuildOption) (Resolver, error)
+ Build(target Target, cc ClientConn, opts BuildOptions) (Resolver, error)
// Scheme returns the scheme supported by this resolver.
// Scheme is defined at https://github.com/grpc/grpc/blob/master/doc/naming.md.
Scheme() string
}
-// ResolveNowOption includes additional information for ResolveNow.
-type ResolveNowOption struct{}
+// ResolveNowOptions includes additional information for ResolveNow.
+type ResolveNowOptions struct{}
// Resolver watches for the updates on the specified target.
// Updates include address updates and service config updates.
@@ -180,7 +240,7 @@ type Resolver interface {
// again. It's just a hint, resolver can ignore this if it's not necessary.
//
// It could be called multiple times concurrently.
- ResolveNow(ResolveNowOption)
+ ResolveNow(ResolveNowOptions)
// Close closes the resolver.
Close()
}
diff --git a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go
index 6934905b0..3eaf724cd 100644
--- a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go
+++ b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go
@@ -21,22 +21,29 @@ package grpc
import (
"fmt"
"strings"
- "sync/atomic"
+ "sync"
+ "time"
+ "google.golang.org/grpc/balancer"
+ "google.golang.org/grpc/credentials"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/internal/channelz"
+ "google.golang.org/grpc/internal/grpcsync"
"google.golang.org/grpc/resolver"
+ "google.golang.org/grpc/serviceconfig"
)
// ccResolverWrapper is a wrapper on top of cc for resolvers.
-// It implements resolver.ClientConnection interface.
+// It implements resolver.ClientConn interface.
type ccResolverWrapper struct {
- cc *ClientConn
- resolver resolver.Resolver
- addrCh chan []resolver.Address
- scCh chan string
- done uint32 // accessed atomically; set to 1 when closed.
- curState resolver.State
+ cc *ClientConn
+ resolverMu sync.Mutex
+ resolver resolver.Resolver
+ done *grpcsync.Event
+ curState resolver.State
+
+ pollingMu sync.Mutex
+ polling chan struct{}
}
// split2 returns the values from strings.SplitN(s, sep, 2).
@@ -67,60 +74,126 @@ func parseTarget(target string) (ret resolver.Target) {
return ret
}
-// newCCResolverWrapper parses cc.target for scheme and gets the resolver
-// builder for this scheme and builds the resolver. The monitoring goroutine
-// for it is not started yet and can be created by calling start().
-//
-// If withResolverBuilder dial option is set, the specified resolver will be
-// used instead.
-func newCCResolverWrapper(cc *ClientConn) (*ccResolverWrapper, error) {
- rb := cc.dopts.resolverBuilder
- if rb == nil {
- return nil, fmt.Errorf("could not get resolver for scheme: %q", cc.parsedTarget.Scheme)
+// newCCResolverWrapper uses the resolver.Builder to build a Resolver and
+// returns a ccResolverWrapper object which wraps the newly built resolver.
+func newCCResolverWrapper(cc *ClientConn, rb resolver.Builder) (*ccResolverWrapper, error) {
+ ccr := &ccResolverWrapper{
+ cc: cc,
+ done: grpcsync.NewEvent(),
}
- ccr := &ccResolverWrapper{
- cc: cc,
- addrCh: make(chan []resolver.Address, 1),
- scCh: make(chan string, 1),
+ var credsClone credentials.TransportCredentials
+ if creds := cc.dopts.copts.TransportCredentials; creds != nil {
+ credsClone = creds.Clone()
+ }
+ rbo := resolver.BuildOptions{
+ DisableServiceConfig: cc.dopts.disableServiceConfig,
+ DialCreds: credsClone,
+ CredsBundle: cc.dopts.copts.CredsBundle,
+ Dialer: cc.dopts.copts.Dialer,
}
var err error
- ccr.resolver, err = rb.Build(cc.parsedTarget, ccr, resolver.BuildOption{DisableServiceConfig: cc.dopts.disableServiceConfig})
+ // We need to hold the lock here while we assign to the ccr.resolver field
+ // to guard against a data race caused by the following code path,
+ // rb.Build-->ccr.ReportError-->ccr.poll-->ccr.resolveNow, would end up
+ // accessing ccr.resolver which is being assigned here.
+ ccr.resolverMu.Lock()
+ defer ccr.resolverMu.Unlock()
+ ccr.resolver, err = rb.Build(cc.parsedTarget, ccr, rbo)
if err != nil {
return nil, err
}
return ccr, nil
}
-func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOption) {
- ccr.resolver.ResolveNow(o)
+func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOptions) {
+ ccr.resolverMu.Lock()
+ if !ccr.done.HasFired() {
+ ccr.resolver.ResolveNow(o)
+ }
+ ccr.resolverMu.Unlock()
}
func (ccr *ccResolverWrapper) close() {
+ ccr.resolverMu.Lock()
ccr.resolver.Close()
- atomic.StoreUint32(&ccr.done, 1)
+ ccr.done.Fire()
+ ccr.resolverMu.Unlock()
}
-func (ccr *ccResolverWrapper) isDone() bool {
- return atomic.LoadUint32(&ccr.done) == 1
+// poll begins or ends asynchronous polling of the resolver based on whether
+// err is ErrBadResolverState.
+func (ccr *ccResolverWrapper) poll(err error) {
+ ccr.pollingMu.Lock()
+ defer ccr.pollingMu.Unlock()
+ if err != balancer.ErrBadResolverState {
+ // stop polling
+ if ccr.polling != nil {
+ close(ccr.polling)
+ ccr.polling = nil
+ }
+ return
+ }
+ if ccr.polling != nil {
+ // already polling
+ return
+ }
+ p := make(chan struct{})
+ ccr.polling = p
+ go func() {
+ for i := 0; ; i++ {
+ ccr.resolveNow(resolver.ResolveNowOptions{})
+ t := time.NewTimer(ccr.cc.dopts.resolveNowBackoff(i))
+ select {
+ case <-p:
+ t.Stop()
+ return
+ case <-ccr.done.Done():
+ // Resolver has been closed.
+ t.Stop()
+ return
+ case <-t.C:
+ select {
+ case <-p:
+ return
+ default:
+ }
+ // Timer expired; re-resolve.
+ }
+ }
+ }()
}
func (ccr *ccResolverWrapper) UpdateState(s resolver.State) {
- if ccr.isDone() {
+ if ccr.done.HasFired() {
return
}
grpclog.Infof("ccResolverWrapper: sending update to cc: %v", s)
if channelz.IsOn() {
ccr.addChannelzTraceEvent(s)
}
- ccr.cc.updateResolverState(s)
ccr.curState = s
+ ccr.poll(ccr.cc.updateResolverState(ccr.curState, nil))
+}
+
+func (ccr *ccResolverWrapper) ReportError(err error) {
+ if ccr.done.HasFired() {
+ return
+ }
+ grpclog.Warningf("ccResolverWrapper: reporting error to cc: %v", err)
+ if channelz.IsOn() {
+ channelz.AddTraceEvent(ccr.cc.channelzID, &channelz.TraceEventDesc{
+ Desc: fmt.Sprintf("Resolver reported error: %v", err),
+ Severity: channelz.CtWarning,
+ })
+ }
+ ccr.poll(ccr.cc.updateResolverState(resolver.State{}, err))
}
// NewAddress is called by the resolver implementation to send addresses to gRPC.
func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) {
- if ccr.isDone() {
+ if ccr.done.HasFired() {
return
}
grpclog.Infof("ccResolverWrapper: sending new addresses to cc: %v", addrs)
@@ -128,31 +201,53 @@ func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) {
ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig})
}
ccr.curState.Addresses = addrs
- ccr.cc.updateResolverState(ccr.curState)
+ ccr.poll(ccr.cc.updateResolverState(ccr.curState, nil))
}
// NewServiceConfig is called by the resolver implementation to send service
// configs to gRPC.
func (ccr *ccResolverWrapper) NewServiceConfig(sc string) {
- if ccr.isDone() {
+ if ccr.done.HasFired() {
return
}
grpclog.Infof("ccResolverWrapper: got new service config: %v", sc)
- c, err := parseServiceConfig(sc)
- if err != nil {
+ if ccr.cc.dopts.disableServiceConfig {
+ grpclog.Infof("Service config lookups disabled; ignoring config")
+ return
+ }
+ scpr := parseServiceConfig(sc)
+ if scpr.Err != nil {
+ grpclog.Warningf("ccResolverWrapper: error parsing service config: %v", scpr.Err)
+ if channelz.IsOn() {
+ channelz.AddTraceEvent(ccr.cc.channelzID, &channelz.TraceEventDesc{
+ Desc: fmt.Sprintf("Error parsing service config: %v", scpr.Err),
+ Severity: channelz.CtWarning,
+ })
+ }
+ ccr.poll(balancer.ErrBadResolverState)
return
}
if channelz.IsOn() {
- ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: c})
+ ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr})
}
- ccr.curState.ServiceConfig = c
- ccr.cc.updateResolverState(ccr.curState)
+ ccr.curState.ServiceConfig = scpr
+ ccr.poll(ccr.cc.updateResolverState(ccr.curState, nil))
+}
+
+func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.ParseResult {
+ return parseServiceConfig(scJSON)
}
func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) {
var updates []string
- oldSC, oldOK := ccr.curState.ServiceConfig.(*ServiceConfig)
- newSC, newOK := s.ServiceConfig.(*ServiceConfig)
+ var oldSC, newSC *ServiceConfig
+ var oldOK, newOK bool
+ if ccr.curState.ServiceConfig != nil {
+ oldSC, oldOK = ccr.curState.ServiceConfig.Config.(*ServiceConfig)
+ }
+ if s.ServiceConfig != nil {
+ newSC, newOK = s.ServiceConfig.Config.(*ServiceConfig)
+ }
if oldOK != newOK || (oldOK && newOK && oldSC.rawJSONString != newSC.rawJSONString) {
updates = append(updates, "service config updated")
}
diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go
index 088c3f1b2..d3a4adc5e 100644
--- a/vendor/google.golang.org/grpc/rpc_util.go
+++ b/vendor/google.golang.org/grpc/rpc_util.go
@@ -648,35 +648,58 @@ func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxRecei
return nil, st.Err()
}
+ var size int
if pf == compressionMade {
// To match legacy behavior, if the decompressor is set by WithDecompressor or RPCDecompressor,
// use this decompressor as the default.
if dc != nil {
d, err = dc.Do(bytes.NewReader(d))
- if err != nil {
- return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
- }
+ size = len(d)
} else {
- dcReader, err := compressor.Decompress(bytes.NewReader(d))
- if err != nil {
- return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
- }
- // Read from LimitReader with limit max+1. So if the underlying
- // reader is over limit, the result will be bigger than max.
- d, err = ioutil.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1))
- if err != nil {
- return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
- }
+ d, size, err = decompress(compressor, d, maxReceiveMessageSize)
}
+ if err != nil {
+ return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
+ }
+ } else {
+ size = len(d)
}
- if len(d) > maxReceiveMessageSize {
+ if size > maxReceiveMessageSize {
// TODO: Revisit the error code. Currently keep it consistent with java
// implementation.
- return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", len(d), maxReceiveMessageSize)
+ return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", size, maxReceiveMessageSize)
}
return d, nil
}
+// Using compressor, decompress d, returning data and size.
+// Optionally, if data will be over maxReceiveMessageSize, just return the size.
+func decompress(compressor encoding.Compressor, d []byte, maxReceiveMessageSize int) ([]byte, int, error) {
+ dcReader, err := compressor.Decompress(bytes.NewReader(d))
+ if err != nil {
+ return nil, 0, err
+ }
+ if sizer, ok := compressor.(interface {
+ DecompressedSize(compressedBytes []byte) int
+ }); ok {
+ if size := sizer.DecompressedSize(d); size >= 0 {
+ if size > maxReceiveMessageSize {
+ return nil, size, nil
+ }
+ // size is used as an estimate to size the buffer, but we
+ // will read more data if available.
+ // +MinRead so ReadFrom will not reallocate if size is correct.
+ buf := bytes.NewBuffer(make([]byte, 0, size+bytes.MinRead))
+ bytesRead, err := buf.ReadFrom(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1))
+ return buf.Bytes(), int(bytesRead), err
+ }
+ }
+ // Read from LimitReader with limit max+1. So if the underlying
+ // reader is over limit, the result will be bigger than max.
+ d, err = ioutil.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1))
+ return d, len(d), err
+}
+
// For the two compressor parameters, both should not be set, but if they are,
// dc takes precedence over compressor.
// TODO(dfawley): wrap the old compressor/decompressor using the new API?
@@ -848,7 +871,7 @@ type channelzData struct {
// The SupportPackageIsVersion variables are referenced from generated protocol
// buffer files to ensure compatibility with the gRPC version used. The latest
-// support package version is 5.
+// support package version is 6.
//
// Older versions are kept for compatibility. They may be removed if
// compatibility cannot be maintained.
@@ -858,6 +881,7 @@ const (
SupportPackageIsVersion3 = true
SupportPackageIsVersion4 = true
SupportPackageIsVersion5 = true
+ SupportPackageIsVersion6 = true
)
const grpcUA = "grpc-go/" + Version
diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go
index f064b73e5..0d75cb109 100644
--- a/vendor/google.golang.org/grpc/server.go
+++ b/vendor/google.golang.org/grpc/server.go
@@ -130,6 +130,7 @@ type serverOptions struct {
readBufferSize int
connectionTimeout time.Duration
maxHeaderListSize *uint32
+ headerTableSize *uint32
}
var defaultServerOptions = serverOptions{
@@ -343,8 +344,8 @@ func StatsHandler(h stats.Handler) ServerOption {
// unknown service handler. The provided method is a bidi-streaming RPC service
// handler that will be invoked instead of returning the "unimplemented" gRPC
// error whenever a request is received for an unregistered service or method.
-// The handling function has full access to the Context of the request and the
-// stream, and the invocation bypasses interceptors.
+// The handling function and stream interceptor (if set) have full access to
+// the ServerStream, including its Context.
func UnknownServiceHandler(streamHandler StreamHandler) ServerOption {
return newFuncServerOption(func(o *serverOptions) {
o.unknownStreamDesc = &StreamDesc{
@@ -377,6 +378,16 @@ func MaxHeaderListSize(s uint32) ServerOption {
})
}
+// HeaderTableSize returns a ServerOption that sets the size of dynamic
+// header table for stream.
+//
+// This API is EXPERIMENTAL.
+func HeaderTableSize(s uint32) ServerOption {
+ return newFuncServerOption(func(o *serverOptions) {
+ o.headerTableSize = &s
+ })
+}
+
// NewServer creates a gRPC server which has no service registered and has not
// started to accept requests yet.
func NewServer(opt ...ServerOption) *Server {
@@ -686,6 +697,7 @@ func (s *Server) newHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) tr
ReadBufferSize: s.opts.readBufferSize,
ChannelzParentID: s.channelzID,
MaxHeaderListSize: s.opts.maxHeaderListSize,
+ HeaderTableSize: s.opts.headerTableSize,
}
st, err := transport.NewServerTransport("http2", c, config)
if err != nil {
@@ -853,41 +865,58 @@ func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Str
}
func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, md *MethodDesc, trInfo *traceInfo) (err error) {
- if channelz.IsOn() {
- s.incrCallsStarted()
- defer func() {
- if err != nil && err != io.EOF {
- s.incrCallsFailed()
- } else {
- s.incrCallsSucceeded()
- }
- }()
- }
sh := s.opts.statsHandler
- if sh != nil {
- beginTime := time.Now()
- begin := &stats.Begin{
- BeginTime: beginTime,
+ if sh != nil || trInfo != nil || channelz.IsOn() {
+ if channelz.IsOn() {
+ s.incrCallsStarted()
}
- sh.HandleRPC(stream.Context(), begin)
- defer func() {
- end := &stats.End{
+ var statsBegin *stats.Begin
+ if sh != nil {
+ beginTime := time.Now()
+ statsBegin = &stats.Begin{
BeginTime: beginTime,
- EndTime: time.Now(),
- }
- if err != nil && err != io.EOF {
- end.Error = toRPCErr(err)
}
- sh.HandleRPC(stream.Context(), end)
- }()
- }
- if trInfo != nil {
- defer trInfo.tr.Finish()
- trInfo.tr.LazyLog(&trInfo.firstLine, false)
+ sh.HandleRPC(stream.Context(), statsBegin)
+ }
+ if trInfo != nil {
+ trInfo.tr.LazyLog(&trInfo.firstLine, false)
+ }
+ // The deferred error handling for tracing, stats handler and channelz are
+ // combined into one function to reduce stack usage -- a defer takes ~56-64
+ // bytes on the stack, so overflowing the stack will require a stack
+ // re-allocation, which is expensive.
+ //
+ // To maintain behavior similar to separate deferred statements, statements
+ // should be executed in the reverse order. That is, tracing first, stats
+ // handler second, and channelz last. Note that panics *within* defers will
+ // lead to different behavior, but that's an acceptable compromise; that
+ // would be undefined behavior territory anyway.
defer func() {
- if err != nil && err != io.EOF {
- trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
- trInfo.tr.SetError()
+ if trInfo != nil {
+ if err != nil && err != io.EOF {
+ trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
+ trInfo.tr.SetError()
+ }
+ trInfo.tr.Finish()
+ }
+
+ if sh != nil {
+ end := &stats.End{
+ BeginTime: statsBegin.BeginTime,
+ EndTime: time.Now(),
+ }
+ if err != nil && err != io.EOF {
+ end.Error = toRPCErr(err)
+ }
+ sh.HandleRPC(stream.Context(), end)
+ }
+
+ if channelz.IsOn() {
+ if err != nil && err != io.EOF {
+ s.incrCallsFailed()
+ } else {
+ s.incrCallsSucceeded()
+ }
}
}()
}
@@ -1087,31 +1116,15 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, sd *StreamDesc, trInfo *traceInfo) (err error) {
if channelz.IsOn() {
s.incrCallsStarted()
- defer func() {
- if err != nil && err != io.EOF {
- s.incrCallsFailed()
- } else {
- s.incrCallsSucceeded()
- }
- }()
}
sh := s.opts.statsHandler
+ var statsBegin *stats.Begin
if sh != nil {
beginTime := time.Now()
- begin := &stats.Begin{
+ statsBegin = &stats.Begin{
BeginTime: beginTime,
}
- sh.HandleRPC(stream.Context(), begin)
- defer func() {
- end := &stats.End{
- BeginTime: beginTime,
- EndTime: time.Now(),
- }
- if err != nil && err != io.EOF {
- end.Error = toRPCErr(err)
- }
- sh.HandleRPC(stream.Context(), end)
- }()
+ sh.HandleRPC(stream.Context(), statsBegin)
}
ctx := NewContextWithServerTransportStream(stream.Context(), stream)
ss := &serverStream{
@@ -1126,6 +1139,41 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
statsHandler: sh,
}
+ if sh != nil || trInfo != nil || channelz.IsOn() {
+ // See comment in processUnaryRPC on defers.
+ defer func() {
+ if trInfo != nil {
+ ss.mu.Lock()
+ if err != nil && err != io.EOF {
+ ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
+ ss.trInfo.tr.SetError()
+ }
+ ss.trInfo.tr.Finish()
+ ss.trInfo.tr = nil
+ ss.mu.Unlock()
+ }
+
+ if sh != nil {
+ end := &stats.End{
+ BeginTime: statsBegin.BeginTime,
+ EndTime: time.Now(),
+ }
+ if err != nil && err != io.EOF {
+ end.Error = toRPCErr(err)
+ }
+ sh.HandleRPC(stream.Context(), end)
+ }
+
+ if channelz.IsOn() {
+ if err != nil && err != io.EOF {
+ s.incrCallsFailed()
+ } else {
+ s.incrCallsSucceeded()
+ }
+ }
+ }()
+ }
+
ss.binlog = binarylog.GetMethodLogger(stream.Method())
if ss.binlog != nil {
md, _ := metadata.FromIncomingContext(ctx)
@@ -1179,16 +1227,6 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
if trInfo != nil {
trInfo.tr.LazyLog(&trInfo.firstLine, false)
- defer func() {
- ss.mu.Lock()
- if err != nil && err != io.EOF {
- ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
- ss.trInfo.tr.SetError()
- }
- ss.trInfo.tr.Finish()
- ss.trInfo.tr = nil
- ss.mu.Unlock()
- }()
}
var appErr error
var server interface{}
diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go
index d0787f1e2..5a80a575a 100644
--- a/vendor/google.golang.org/grpc/service_config.go
+++ b/vendor/google.golang.org/grpc/service_config.go
@@ -136,9 +136,9 @@ type retryPolicy struct {
maxAttempts int
// Exponential backoff parameters. The initial retry attempt will occur at
- // random(0, initialBackoffMS). In general, the nth attempt will occur at
+ // random(0, initialBackoff). In general, the nth attempt will occur at
// random(0,
- // min(initialBackoffMS*backoffMultiplier**(n-1), maxBackoffMS)).
+ // min(initialBackoff*backoffMultiplier**(n-1), maxBackoff)).
//
// These fields are required and must be greater than zero.
initialBackoff time.Duration
@@ -261,20 +261,17 @@ type jsonSC struct {
}
func init() {
- internal.ParseServiceConfig = func(sc string) (interface{}, error) {
- return parseServiceConfig(sc)
- }
+ internal.ParseServiceConfigForTesting = parseServiceConfig
}
-
-func parseServiceConfig(js string) (*ServiceConfig, error) {
+func parseServiceConfig(js string) *serviceconfig.ParseResult {
if len(js) == 0 {
- return nil, fmt.Errorf("no JSON service config provided")
+ return &serviceconfig.ParseResult{Err: fmt.Errorf("no JSON service config provided")}
}
var rsc jsonSC
err := json.Unmarshal([]byte(js), &rsc)
if err != nil {
grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err)
- return nil, err
+ return &serviceconfig.ParseResult{Err: err}
}
sc := ServiceConfig{
LB: rsc.LoadBalancingPolicy,
@@ -288,7 +285,7 @@ func parseServiceConfig(js string) (*ServiceConfig, error) {
if len(lbcfg) != 1 {
err := fmt.Errorf("invalid loadBalancingConfig: entry %v does not contain exactly 1 policy/config pair: %q", i, lbcfg)
grpclog.Warningf(err.Error())
- return nil, err
+ return &serviceconfig.ParseResult{Err: err}
}
var name string
var jsonCfg json.RawMessage
@@ -303,17 +300,25 @@ func parseServiceConfig(js string) (*ServiceConfig, error) {
var err error
sc.lbConfig.cfg, err = parser.ParseConfig(jsonCfg)
if err != nil {
- return nil, fmt.Errorf("error parsing loadBalancingConfig for policy %q: %v", name, err)
+ return &serviceconfig.ParseResult{Err: fmt.Errorf("error parsing loadBalancingConfig for policy %q: %v", name, err)}
}
} else if string(jsonCfg) != "{}" {
grpclog.Warningf("non-empty balancer configuration %q, but balancer does not implement ParseConfig", string(jsonCfg))
}
break
}
+ if sc.lbConfig == nil {
+ // We had a loadBalancingConfig field but did not encounter a
+ // supported policy. The config is considered invalid in this
+ // case.
+ err := fmt.Errorf("invalid loadBalancingConfig: no supported policies found")
+ grpclog.Warningf(err.Error())
+ return &serviceconfig.ParseResult{Err: err}
+ }
}
if rsc.MethodConfig == nil {
- return &sc, nil
+ return &serviceconfig.ParseResult{Config: &sc}
}
for _, m := range *rsc.MethodConfig {
if m.Name == nil {
@@ -322,7 +327,7 @@ func parseServiceConfig(js string) (*ServiceConfig, error) {
d, err := parseDuration(m.Timeout)
if err != nil {
grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err)
- return nil, err
+ return &serviceconfig.ParseResult{Err: err}
}
mc := MethodConfig{
@@ -331,7 +336,7 @@ func parseServiceConfig(js string) (*ServiceConfig, error) {
}
if mc.retryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil {
grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err)
- return nil, err
+ return &serviceconfig.ParseResult{Err: err}
}
if m.MaxRequestMessageBytes != nil {
if *m.MaxRequestMessageBytes > int64(maxInt) {
@@ -356,13 +361,13 @@ func parseServiceConfig(js string) (*ServiceConfig, error) {
if sc.retryThrottling != nil {
if mt := sc.retryThrottling.MaxTokens; mt <= 0 || mt > 1000 {
- return nil, fmt.Errorf("invalid retry throttling config: maxTokens (%v) out of range (0, 1000]", mt)
+ return &serviceconfig.ParseResult{Err: fmt.Errorf("invalid retry throttling config: maxTokens (%v) out of range (0, 1000]", mt)}
}
if tr := sc.retryThrottling.TokenRatio; tr <= 0 {
- return nil, fmt.Errorf("invalid retry throttling config: tokenRatio (%v) may not be negative", tr)
+ return &serviceconfig.ParseResult{Err: fmt.Errorf("invalid retry throttling config: tokenRatio (%v) may not be negative", tr)}
}
}
- return &sc, nil
+ return &serviceconfig.ParseResult{Config: &sc}
}
func convertRetryPolicy(jrp *jsonRetryPolicy) (p *retryPolicy, err error) {
diff --git a/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go b/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go
index 53b27875a..187c30442 100644
--- a/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go
+++ b/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go
@@ -22,27 +22,20 @@
// This package is EXPERIMENTAL.
package serviceconfig
-import (
- "google.golang.org/grpc/internal"
-)
-
// Config represents an opaque data structure holding a service config.
type Config interface {
- isConfig()
+ isServiceConfig()
}
// LoadBalancingConfig represents an opaque data structure holding a load
-// balancer config.
+// balancing config.
type LoadBalancingConfig interface {
isLoadBalancingConfig()
}
-// Parse parses the JSON service config provided into an internal form or
-// returns an error if the config is invalid.
-func Parse(ServiceConfigJSON string) (Config, error) {
- c, err := internal.ParseServiceConfig(ServiceConfigJSON)
- if err != nil {
- return nil, err
- }
- return c.(Config), err
+// ParseResult contains a service config or an error. Exactly one must be
+// non-nil.
+type ParseResult struct {
+ Config Config
+ Err error
}
diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go
index f3f593c84..9e22c393f 100644
--- a/vendor/google.golang.org/grpc/stats/stats.go
+++ b/vendor/google.golang.org/grpc/stats/stats.go
@@ -91,6 +91,8 @@ type InHeader struct {
LocalAddr net.Addr
// Compression is the compression algorithm used for the RPC.
Compression string
+ // Header contains the header metadata received.
+ Header metadata.MD
}
// IsClient indicates if the stats information is from client side.
@@ -104,6 +106,9 @@ type InTrailer struct {
Client bool
// WireLength is the wire length of trailer.
WireLength int
+ // Trailer contains the trailer metadata received from the server. This
+ // field is only valid if this InTrailer is from the client side.
+ Trailer metadata.MD
}
// IsClient indicates if the stats information is from client side.
@@ -146,6 +151,8 @@ type OutHeader struct {
LocalAddr net.Addr
// Compression is the compression algorithm used for the RPC.
Compression string
+ // Header contains the header metadata sent.
+ Header metadata.MD
}
// IsClient indicates if this stats information is from client side.
@@ -159,6 +166,9 @@ type OutTrailer struct {
Client bool
// WireLength is the wire length of trailer.
WireLength int
+ // Trailer contains the trailer metadata sent to the client. This
+ // field is only valid if this OutTrailer is from the server side.
+ Trailer metadata.MD
}
// IsClient indicates if this stats information is from client side.
@@ -176,6 +186,7 @@ type End struct {
EndTime time.Time
// Trailer contains the trailer metadata received from the server. This
// field is only valid if this End is from the client side.
+ // Deprecated: use Trailer in InTrailer instead.
Trailer metadata.MD
// Error is the error the RPC ended with. It is an error generated from
// status.Status and can be converted back to status.Status using
diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go
index 134a624a1..bb99940e3 100644
--- a/vendor/google.golang.org/grpc/stream.go
+++ b/vendor/google.golang.org/grpc/stream.go
@@ -488,7 +488,7 @@ func (cs *clientStream) shouldRetry(err error) error {
pushback := 0
hasPushback := false
if cs.attempt.s != nil {
- if to, toErr := cs.attempt.s.TrailersOnly(); toErr != nil || !to {
+ if !cs.attempt.s.TrailersOnly() {
return err
}
diff --git a/vendor/google.golang.org/grpc/trace.go b/vendor/google.golang.org/grpc/trace.go
index 0a57b9994..07a2d26b3 100644
--- a/vendor/google.golang.org/grpc/trace.go
+++ b/vendor/google.golang.org/grpc/trace.go
@@ -41,9 +41,6 @@ func methodFamily(m string) string {
if i := strings.Index(m, "/"); i >= 0 {
m = m[:i] // remove everything from second slash
}
- if i := strings.LastIndex(m, "."); i >= 0 {
- m = m[i+1:] // cut down to last dotted component
- }
return m
}
diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go
index 588850563..1a831b159 100644
--- a/vendor/google.golang.org/grpc/version.go
+++ b/vendor/google.golang.org/grpc/version.go
@@ -19,4 +19,4 @@
package grpc
// Version is the current grpc version.
-const Version = "1.23.1"
+const Version = "1.27.1"